content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.shortcuts import render
from django.core.paginator import Paginator
from django.contrib.auth.models import User
from django.views import View
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
7295,
13,
79,
363,
20900,
1330,
31525,
20900,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
33571,
... | 3.604651 | 43 |
"""Tirar espaços que tão sobrando no fim e no início da string"""
cor = " vermelho e azul "
print(cor.strip()) | [
37811,
51,
343,
283,
1658,
8957,
16175,
418,
8358,
256,
28749,
523,
17938,
78,
645,
277,
320,
304,
645,
287,
8836,
66,
952,
12379,
4731,
37811,
198,
198,
10215,
796,
366,
220,
3326,
17694,
8873,
304,
35560,
377,
220,
220,
220,
220,
... | 2.269231 | 52 |
"""
Objects intended to extract and store information about various components.
By changing an import, predefined bundles of variables can be used with the same organization;
see predefined.py in this folder. Helpful for testing without api data.
Organization: specific properties of the spring or
"""
import numpy as np
class Torsion_Bar_Info(object):
"""
Store information about laminated torsion bars as parsed from api part data dict
"""
class Spring_Info(object):
"""
Store information about axial springs as parsed from api part data dict
Organization of this object will likely change as part connectivity files become available-
in particular the methods to determine hatch/body attachment points may be reorganized
"""
class Hatch_Info(object):
"""
Store information about hatches, parsed from API nested dictionary of part info. Also store
associated springs and torsion bars.
"""
def _calc_angle_range(self, ang_incr_deg=1.):
"""
Get the range (min..max) of possible angles that the hatch can be open.
"""
# Default increment is 1 degree
ang_increm = np.pi/180 * ang_incr_deg
return np.arange(self.ang_closed, self.max_ang + ang_increm, ang_increm)
def _trans2d(self, pt_vec):
"""
Transform a 1x4 point vector (for use with 4x4 transformation matrix) to the 2d hatch
plane assumed for all calculations. Rotates, sets z=0, and returns a 1x3 coordinate vector.
"""
new_vec = np.dot(np.linalg.inv(self.hinge_csys), pt_vec)
new_vec[2] = 0.
return new_vec[:3]
| [
37811,
201,
198,
10267,
82,
5292,
284,
7925,
290,
3650,
1321,
546,
2972,
6805,
13,
201,
198,
201,
198,
3886,
5609,
281,
1330,
11,
2747,
18156,
36344,
286,
9633,
460,
307,
973,
351,
262,
976,
4009,
26,
201,
198,
3826,
2747,
18156,
13... | 2.808333 | 600 |
import urllib.request, urllib.error, urllib.parse
import os
| [
11748,
2956,
297,
571,
13,
25927,
11,
2956,
297,
571,
13,
18224,
11,
2956,
297,
571,
13,
29572,
198,
11748,
28686,
628,
198
] | 2.695652 | 23 |
# Starting with a skeleton process which gets imported with the following line
from PhysicsTools.PatAlgos.patTemplate_cfg import *
from PhysicsTools.PatAlgos.tools.coreTools import *
from RecoJets.JetProducers.PFJetParameters_cfi import *
from RecoJets.JetProducers.CaloJetParameters_cfi import *
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
from RecoJets.JetProducers.GenJetParameters_cfi import *
from BristolAnalysis.NTupleTools.options import CMSSW_MAJOR_VERSION
# this has to run AFTER setup_PF2PAT
def setup_jets(process, cms, options, postfix="PFlow"):
'''
application of residual corrections. Have to be set to True once the 13 TeV
residual corrections are available. False to be kept meanwhile. Can be kept
to False
'''
print '=' * 60
print "Setting up Jets"
print "Overwrites JEC/JER in GT for application in JetUserData"
print "Presently only JER (Please modify for JEC)"
print '=' * 60
import os
# If you want to use other JEC files then put .db file in data/JEC/ and use option useJECFromFile=1 on command line
runOnData = options.isData
era = "Spring16_25nsV10_"
if runOnData: era += 'DATA'
else: era += 'MC'
dBFile = os.path.expandvars("$CMSSW_BASE/src/BristolAnalysis/NTupleTools/data/JEC/" + era + ".db")
print 'Using JEC from DB: {0}'.format(dBFile)
if not runOnData:
# JER
process.load('Configuration.StandardSequences.Services_cff')
process.load("JetMETCorrections.Modules.JetResolutionESProducer_cfi")
from CondCore.DBCommon.CondDBSetup_cfi import CondDBSetup
process.jer = cms.ESSource("PoolDBESSource",
CondDBSetup,
toGet = cms.VPSet(
# Resolution
cms.PSet(
record = cms.string('JetResolutionRcd'),
tag = cms.string('JR_Spring16_25nsV10_MC_PtResolution_AK4PFchs'),
label = cms.untracked.string('AK4PFchs_pt')
),
# Scale factors
cms.PSet(
record = cms.string('JetResolutionScaleFactorRcd'),
tag = cms.string('JR_Spring16_25nsV10_MC_SF_AK4PFchs'),
label = cms.untracked.string('AK4PFchs')
),
),
connect = cms.string('sqlite_file:'+dBFile)
)
process.es_prefer_jer = cms.ESPrefer('PoolDBESSource', 'jer')
# process.jec = cms.ESSource("PoolDBESSource",
# CondDBSetup,
# toGet=cms.VPSet(
# cms.PSet(
# record=cms.string("JetCorrectionsRecord"),
# tag=cms.string("JetCorrectorParametersCollection_" + era + "_AK4PF"),
# label=cms.untracked.string("AK4PF")
# ),
# cms.PSet(
# record=cms.string("JetCorrectionsRecord"),
# tag=cms.string("JetCorrectorParametersCollection_" + era + "_AK4PFchs"),
# label=cms.untracked.string("AK4PFchs")
# ),
# ),
# connect=cms.string("sqlite:" + dBFile),
# )
# process.es_prefer_jec = cms.ESPrefer("PoolDBESSource", 'jec')
| [
2,
17962,
351,
257,
18328,
1429,
543,
3011,
17392,
351,
262,
1708,
1627,
198,
6738,
23123,
33637,
13,
12130,
2348,
70,
418,
13,
8071,
30800,
62,
37581,
1330,
1635,
198,
198,
6738,
23123,
33637,
13,
12130,
2348,
70,
418,
13,
31391,
13,... | 2.066377 | 1,612 |
"""
This module contains all tests
"""
| [
37811,
198,
1212,
8265,
4909,
477,
5254,
198,
37811,
198
] | 3.9 | 10 |
### NOW OBSOLETE: MOVED INTO VARIABLE_STEP.PY
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from numpy import random
from random import random
plt.style.use('seaborn-whitegrid')
class BrownianAnimation():
'''
Animating the 1D, 2D and 3D Brownian motion of one and many paths.
Animation objects are created using the FuncAnimation() method.
Parameters
----------
fig : Figure
Figure object used to animate upon
func : callable
Callable method to call at each frame
frames : int
Number of frames, set to N+1 since range taken
fargs : tuple or None
Additional arguments to pass to each call of func
interval : int, default 200
Delay between frames in ms
repeat : bool, default True
Whether the animation repeats after completion
'''
### The random state can be set to maintain reproducibility:
# np.random.seed(0)
def generate_random_walks(self, dt, N, dimension):
'''
Create a random walk line using scalar Brownian motion equations (as in brownian_prototype.py).
Note that there is variable step size due to using np.random.randn(1,N), a standard normal distribution.
Parameters
----------
dt : float
The square root of the time interval T/(N-1)
N : int
The number of steps
dimension : str
Selects which array is returned based on number of random variables
'''
pos = np.linspace(0, 1000, 1001) # no. of steps, for 1D plot
dx = np.sqrt(dt) * np.random.randn(1, N)
x = np.cumsum(dx, axis=1)
dy = np.sqrt(dt) * np.random.randn(1, N)
y = np.cumsum(dy, axis=1)
dz = np.sqrt(dt) * np.random.randn(1, N)
z = np.cumsum(dz, axis=1)
### Creates nested lists, vertically stacking the random variables into numpy arrays for 2D and 3D
linedata_1D = np.vstack((pos, x))
linedata_2D = np.vstack((x, y))
linedata_3D = np.vstack((x, y, z))
### Returns an array of shape (dimension, N)
if dimension == '1D':
return linedata_1D
elif dimension == '2D':
return linedata_2D
else: # dimension == '3D'
return linedata_3D
def update_random_walks(self, num, walkData, lines, dimension):
'''
Returns the Line2D objects to update each frame of the animaton. The zip() function combines multiple lists in parallel by collecting items at each position into a single tuple.
Parameters
----------
num : int
Number of frames
walkData : list
walkData list passed in
lines : list
lines list passed in
dimension : str
Selects which Line2D object is returned based on number of random variables
'''
for line, data in zip(lines, walkData):
if dimension == '1D':
line.set_data(data[0:2, 0:num])
if dimension == '2D':
line.set_data(data[0:2, 0:num])
elif dimension == '3D': # dimension == '3D'
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2, 0:num])
return lines
def animate(self, dimension, save=False):
'''
Creates animations in 1D, 2D and 3D, using matplotlib.animation.
Parameters
----------
dimension : str
Selects whether the animation is plotted in 1D, 2D or 3D
save : bool
Determines whether animation is saved to specified path
'''
### Initialise parameters as previously
T = 1.0
N = 1001
dt = math.sqrt(T/(N-1))
M = 6 # number of walkers
### Put lineData list into another list (needed for matplotlib.lines.Line2D module). For loop added for multiple walkers.
walkData = [self.generate_random_walks(dt, N, dimension) for i in range(M)]
### Assign lists in walkData to position variables
### Also set limits for graph axes (otherwise goes off graph for multiple walkers)
minmax_x, minmax_y, minmax_z = [], [], []
if dimension == '1D':
for i in range(M):
pos = walkData[i][0]
x = walkData[i][1]
max_x, min_x = max(x), min(x)
minmax_x.append(max_x), minmax_x.append(min_x)
elif dimension == '2D':
for i in range(M):
x = walkData[i][0]
y = walkData[i][1]
max_x, min_x = max(x), min(x)
minmax_x.append(max_x), minmax_x.append(min_x)
max_y, min_y = max(y), min(y)
minmax_y.append(max_y), minmax_y.append(min_y)
else: # dimension == '3D'
for i in range(M):
x = walkData[i][0]
y = walkData[i][1]
z = walkData[i][2]
max_x, min_x = max(x), min(x)
minmax_x.append(max_x), minmax_x.append(min_x)
max_y, min_y = max(y), min(y)
minmax_y.append(max_y), minmax_y.append(min_y)
max_z, min_z = max(z), min(z)
minmax_z.append(max_z), minmax_z.append(min_z)
### Create figure and axis objects with tight axes
fig = plt.figure()
if dimension == '1D':
ax = fig.add_subplot(1, 1, 1)
ax = plt.axes(xlim=(min(pos)+0.1*min(pos), max(pos)+0.1*max(pos)), ylim=(min(minmax_x)+0.1*min(minmax_x), max(minmax_x)+0.1*max(minmax_x)))
ax.set_xlabel('Number of steps')
ax.set_ylabel('X(t)')
ax.set_title('1D Discretised Brownian Path')
elif dimension == '2D':
ax = fig.add_subplot(1, 1, 1)
ax = plt.axes(xlim=(min(minmax_x)+0.1*min(minmax_x), max(minmax_x)+0.1*max(minmax_x)), ylim=(min(minmax_y)+0.1*min(minmax_y), max(minmax_y)+0.1*max(minmax_y)))
ax.set_xlabel('X(t)')
ax.set_ylabel('Y(t)')
ax.set_title('2D Discretised Brownian Path')
else: # dimension == '3D'
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.set_xlabel('X(t)')
ax.set_xlim3d((min(minmax_x)+0.1*min(minmax_x), max(minmax_x)+0.1*max(minmax_x)))
ax.set_ylabel('Y(t)')
ax.set_ylim3d((min(minmax_y)+0.1*min(minmax_y), max(minmax_y)+0.1*max(minmax_y)))
ax.set_zlabel('Z(t)')
ax.set_zlim3d((min(minmax_z)+0.1*min(minmax_z), max(minmax_z)+0.1*max(minmax_z)))
ax.set_title('3D Discretised Brownian Path')
### Use list comprehension to create a list of Line2D Objects
if dimension == '1D' or '2D':
lines = [ax.plot(data[0, 0:1], data[1, 0:1])[0] for data in walkData]
else: # dimension == '3D'
lines = [ax.plot(data[0, 0:1], data[1, 0:1], data[2, 0:1])[0] for data in walkData]
### Create animation object using the FuncAnimation() method
anim = animation.FuncAnimation(
fig, self.update_random_walks, N+1, fargs=(walkData, lines, dimension), interval=30, repeat=True, blit=False)
if save:
file_name = input("Name of animation file: ")
full_path = rf"C:\Users\Melissa\OneDrive - Lancaster University\University\Third Year\PHYS 389\Results\{file_name}.gif"
anim.save(full_path)
plt.show()
'''Individual methods - decide if you want to keep:'''
if __name__ == '__main__':
### Create instance of class and call relevant method
test = BrownianAnimation()
test.animate('3D')
| [
21017,
20229,
440,
4462,
46,
2538,
9328,
25,
28184,
1961,
39319,
569,
1503,
3539,
19146,
62,
42135,
13,
47,
56,
198,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
... | 2.071901 | 3,783 |
from typing import Final as _Final
__version__: _Final[str] = "0.0.1"
__author__: _Final[str] = "AnimateShadows"
__url__: _Final[str] = "https://github.com/AnimateShadows/dapi"
| [
6738,
19720,
1330,
8125,
355,
4808,
19006,
198,
198,
834,
9641,
834,
25,
4808,
19006,
58,
2536,
60,
796,
366,
15,
13,
15,
13,
16,
1,
198,
834,
9800,
834,
25,
4808,
19006,
58,
2536,
60,
796,
366,
2025,
1920,
2484,
9797,
1,
198,
8... | 2.542857 | 70 |
"""
1047. Remove All Adjacent Duplicates In String
Easy
2997
You are given a string s consisting of lowercase English letters. A duplicate removal consists of choosing two adjacent and equal letters and removing them.
We repeatedly make duplicate removals on s until we no longer can.
Return the final string after all such duplicate removals have been made. It can be proven that the answer is unique.
Example 1:
Input: s = "abbaca"
Output: "ca"
Explanation:
For example, in "abbaca" we could remove "bb" since the letters are adjacent and equal, and this is the only possible move. The result of this move is that the string is "aaca", of which only "aa" is possible, so the final string is "ca".
Example 2:
Input: s = "azxxzy"
Output: "ay"
Constraints:
1 <= s.length <= 105
s consists of lowercase English letters.
"""
# V0
# IDEA : STACK
# V0'
# IDEA : TWO POINTERS
# -> pointers : end, c
# V1
# IDEA : STACK
# https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string/discuss/294893/JavaC%2B%2BPython-Two-Pointers-and-Stack-Solution
# V1'
# IDEA : TWO POINTERS
# https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string/discuss/294964/JavaPython-3-three-easy-iterative-codes-w-brief-explanation-analysis-and-follow-up.
# V1''
# IDEA : REPLACE
# https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string/solution/
from string import ascii_lowercase
# V1'''
# IDEA : STACK
# https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string/solution/
# V2 | [
37811,
198,
198,
940,
2857,
13,
17220,
1439,
1215,
73,
12643,
49821,
16856,
554,
10903,
198,
28406,
198,
198,
1959,
5607,
198,
198,
1639,
389,
1813,
257,
4731,
264,
17747,
286,
2793,
7442,
3594,
7475,
13,
317,
23418,
9934,
10874,
286,
... | 2.897921 | 529 |
# -*- coding: utf-8 -*-
"""BaselinesTesting.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1JSwCulz7OydRTORHg24A1D-BCmvJfFUQ
"""
!apt install swig cmake libopenmpi-dev zlib1g-dev
!pip install stable-baselines
import sys
import os
import numpy as np
import tensorflow as tf
import gym
import stable_baselines
from stable_baselines.common.cmd_util import make_atari_env
from stable_baselines.common.policies import CnnPolicy
from stable_baselines import PPO2
def pythonVersionString():
"""Current system python version as string major.minor.micro [(alpha|beta|etc)]"""
vstring = "{0}.{1}.{2}".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)
if sys.version_info.releaselevel != "final":
vstring += " ({})".format( sys.version_info.releaselevel )
if sys.version_info.serial != 0:
vstring += " (serial: {})".format( sys.version_info.serial )
return vstring
printVersions( ["Python", tf, np, gym, stable_baselines] )
from google.colab import drive
drive.mount('/content/gdrive')
!ls /content/gdrive/My\ Drive/ColabData/baseline_tests
# Initialize environments and model
base_dir = "/content/gdrive/My Drive/ColabData/baseline_tests"
stable_baselines.logger.configure(folder=base_dir, format_strs=['stdout', 'log', 'csv','tensorboard'])
model_path = os.path.join( base_dir, "test_1.pkl" )
num_envs = 2
random_seed = 0
env_names = [ 'DemonAttackNoFrameskip-v4', 'SpaceInvadersNoFrameskip-v4' ]
envs = []
for env_name in env_names:
envs.append( make_atari_env(env_name, num_env=num_envs, seed=random_seed) )
if os.path.exists(model_path):
print( "Loading model from {}".format( model_path ))
model = PPO2.load(model_path)
else:
print( "Creating model" )
model = PPO2(CnnPolicy, envs[0], verbose=0)
epochs = 10
steps_per_epoch = 10000
for epoch in range(epochs):
for idx, env in enumerate(envs):
model.set_env(env)
print( "training {} for {} steps".format( env_names[idx], steps_per_epoch) )
model.learn(total_timesteps=steps_per_epoch)
model.save(model_path)
for idx, env in enumerate(envs):
test_env( model, env, env_names[idx] )
"""Outputs from traiing runs
```
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0175 6
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0165 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0165 8
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0235 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0185 9
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.022 10
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0135 7
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.019 12
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0145 11
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0215 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0235 7
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.02 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.015 11
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.026 10
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0145 3
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.022 14
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0095 5
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0245 7
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0105 11
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.019 13
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0155 7
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0235 7
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.026 9
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0265 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0195 5
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0265 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0215 7
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0305 12
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0165 8
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.027 12
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.016 9
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.033 8
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0175 6
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.027 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.017 17
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0395 7
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0155 7
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0215 14
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0205 11
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0265 8
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.019 9
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0255 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.0145 5
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0255 13
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.023 8
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0205 15
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.019 8
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.022 11
training DemonAttackNoFrameskip-v4 for 10000 steps
training SpaceInvadersNoFrameskip-v4 for 10000 steps
DemonAttackNoFrameskip-v4 mean rewards/episodes: 0.021 7
SpaceInvadersNoFrameskip-v4 mean rewards/episodes: 0.0245 13
```
""" | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
15522,
20655,
44154,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
... | 2.88562 | 2,719 |
import unittest
from pyproxypattern import Proxy
from aiounittest import async_test
| [
11748,
555,
715,
395,
198,
6738,
12972,
1676,
87,
4464,
265,
759,
1330,
38027,
198,
6738,
257,
72,
977,
715,
395,
1330,
30351,
62,
9288,
628,
628
] | 3.222222 | 27 |
import RPi.GPIO as GPIO2
import time | [
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
17,
198,
11748,
640
] | 3 | 12 |
from .load_ur5_parallel import load_ur5_parallel
| [
6738,
764,
2220,
62,
333,
20,
62,
1845,
29363,
1330,
3440,
62,
333,
20,
62,
1845,
29363,
198
] | 2.722222 | 18 |
from contextlib import contextmanager
| [
6738,
4732,
8019,
1330,
4732,
37153,
628
] | 5.571429 | 7 |
# coding: utf-8
"""Test parsing of COUNTER BR2 book report."""
from __future__ import absolute_import
import os
import unittest
import warnings
import pycounter.report
class ParseExample(unittest.TestCase):
"""Tests for parsing C3 BR2"""
class ParseCounter4Example(unittest.TestCase):
"""Tests for parsing C4 BR2"""
class ParseLatin1(unittest.TestCase):
"""Tests for parsing BR2 in latin-1 encoding"""
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
37811,
14402,
32096,
286,
31404,
5781,
11177,
17,
1492,
989,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
146... | 3 | 141 |
import math
from python.src.neurons.activation_functions.activation_function import ActivationFunction
| [
171,
119,
123,
11748,
10688,
198,
6738,
21015,
13,
10677,
13,
710,
333,
684,
13,
48545,
62,
12543,
2733,
13,
48545,
62,
8818,
1330,
13144,
341,
22203,
628,
198
] | 3.655172 | 29 |
from django.http import JsonResponse, StreamingHttpResponse
from worker.bases import get_config, rand_sig, get_user_folder_size
from django.core.paginator import EmptyPage, PageNotAnInteger
import os
def get_maintenance_protocols():
"""
Get maintenance protocols
:return: list, module names
"""
protocols = []
protocols_path = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'maintenance_protocols')
for model_name in os.listdir(protocols_path):
if not model_name.endswith('.py') or model_name.startswith('_') or model_name.startswith('maintenance'):
continue
protocols.append(model_name.replace('.py', ''))
return protocols
| [
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
11,
43124,
43481,
31077,
198,
6738,
8383,
13,
65,
1386,
1330,
651,
62,
11250,
11,
43720,
62,
82,
328,
11,
651,
62,
7220,
62,
43551,
62,
7857,
198,
6738,
42625,
14208,
13,
7295,
1... | 2.730769 | 260 |
import pandas as pd
import csv
import nltk
from nltk.corpus import stopwords
if __name__ == '__main__':
main()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
269,
21370,
198,
11748,
299,
2528,
74,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
... | 2.543478 | 46 |
from functools import wraps
from flask import g, request, redirect, url_for, session, render_template
import sqlite3 as lite
import sys
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/
"""
@wraps(f)
return decorated_function
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
42903,
1330,
308,
11,
2581,
11,
18941,
11,
19016,
62,
1640,
11,
6246,
11,
8543,
62,
28243,
198,
11748,
44161,
578,
18,
355,
300,
578,
198,
11748,
25064,
628,
198,
4299,
17594,
62,
35827,... | 2.833333 | 114 |
import numpy as np
import os
import cv2 as cv
from constants import constants
if __name__ == "__main__":
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
6738,
38491,
1330,
38491,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.926829 | 41 |
from eth.beacon.types.blocks import BaseBeaconBlock
from eth.beacon.types.pending_attestation_records import PendingAttestationRecord
from eth.beacon.types.states import BeaconState
from eth.beacon.state_machines.configs import BeaconConfig
from .validation import (
validate_serenity_attestation,
)
def process_attestations(state: BeaconState,
block: BaseBeaconBlock,
config: BeaconConfig) -> BeaconState:
"""
Implements 'per-block-processing.operations.attestations' portion of Phase 0 spec:
https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations-1
Validate the ``attestations`` contained within the ``block`` in the context of ``state``.
If any invalid, throw ``ValidationError``.
Otherwise, append an ``PendingAttestationRecords`` for each to ``latest_attestations``.
Return resulting ``state``.
"""
for attestation in block.body.attestations:
validate_serenity_attestation(
state,
attestation,
config.EPOCH_LENGTH,
config.MIN_ATTESTATION_INCLUSION_DELAY,
config.LATEST_BLOCK_ROOTS_LENGTH,
)
# update_latest_attestations
additional_pending_attestations = tuple(
PendingAttestationRecord(
data=attestation.data,
participation_bitfield=attestation.participation_bitfield,
custody_bitfield=attestation.custody_bitfield,
slot_included=state.slot,
)
for attestation in block.body.attestations
)
state = state.copy(
latest_attestations=state.latest_attestations + additional_pending_attestations,
)
return state
| [
6738,
4555,
13,
1350,
7807,
13,
19199,
13,
27372,
1330,
7308,
3856,
7807,
12235,
198,
6738,
4555,
13,
1350,
7807,
13,
19199,
13,
79,
1571,
62,
1078,
27364,
62,
8344,
3669,
1330,
350,
1571,
8086,
27364,
23739,
198,
6738,
4555,
13,
1350... | 2.467811 | 699 |
import math
import pyglet
from pyglet.gl import *
| [
11748,
10688,
198,
198,
11748,
12972,
70,
1616,
198,
6738,
12972,
70,
1616,
13,
4743,
1330,
1635,
628,
198
] | 2.789474 | 19 |
# Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data class objects that works with tf.function."""
from typing import Type, Any
import attr
__all__ = ['dataclass']
def dataclass(cls: Type[Any]) -> Type[Any]:
"""Creates a data class object compatible with `tf.function`.
Modifies dunder methods of an input class with typed attributes to work as an
input/output to `tf.function`, as well as a loop variable of
`tf.while_loop`.
An intended use case for this decorator is on top of a simple class definition
with type annotated arguments like in the example below. It is not guaranteed
that this decorator works with an arbitrary class.
#### Examples
```python
import tensorflow as tf
import tf_quant_finance as tff
@tff.utils.dataclass
class Coords:
x: tf.Tensor
y: tf.Tensor
@tf.function
def fn(start_coords: Coords) -> Coords:
def cond(it, _):
return it < 10
def body(it, coords):
return it + 1, Coords(x=coords.x + 1, y=coords.y + 2)
return tf.while_loop(cond, body, loop_vars=(0, start_coords))[1]
start_coords = Coords(x=tf.constant(0), y=tf.constant(0))
fn(start_coords)
# Expected Coords(a=10, b=20)
```
Args:
cls: Input class object with type annotated arguments. The class should not
have an init method defined. Class fields are treated as ordered in the
same order as they appear in the class definition.
Returns:
Modified class that can be used as a `tf.function` input/output as well
as a loop variable of `tf.function`. All typed arguments of the original
class are treated as ordered in the same order as they appear in the class
definition. All untyped arguments are ignored. Modified class modifies
`len` and `iter` methods defined for the class instances such that `len`
returns the number of arguments, and `iter` creates an iterator for the
ordered argument values.
"""
# Wrap the class with attr.s to ensure that the class can be an input/output
# to a `tf.function`
cls = attr.s(cls, auto_attribs=True)
# Define __iter__ and __len__ method to ensure tf.while_loop compatibility
cls.__len__ = __len__
cls.__iter__ = __iter__
return cls
| [
2,
406,
600,
355,
25,
21015,
18,
198,
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,... | 3.190972 | 864 |
import sys
import re
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
chars = [match.start() for match in re.finditer('[a-zA-Z]', line)]
isupper = True
for i in chars:
c = line[i].upper() if isupper else line[i].lower()
line = line[0:i] + c + line[i + 1::]
isupper = isupper == False
print(line)
lines.close()
| [
11748,
25064,
201,
198,
11748,
302,
201,
198,
201,
198,
6615,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
4357,
705,
81,
11537,
201,
198,
1640,
1627,
287,
3951,
25,
201,
198,
220,
220,
220,
1627,
796,
1627,
13,
33491,
10786,
59,
77,... | 2.012821 | 234 |
from flask import request
from zeus.models import Build
from zeus.api.utils.upserts import upsert_job
from .base import BaseHook
| [
6738,
42903,
1330,
2581,
198,
198,
6738,
1976,
27650,
13,
27530,
1330,
10934,
198,
6738,
1976,
27650,
13,
15042,
13,
26791,
13,
4739,
861,
82,
1330,
19649,
861,
62,
21858,
198,
198,
6738,
764,
8692,
1330,
7308,
39,
566,
628
] | 3.3 | 40 |
from django.shortcuts import render
from core.models import *
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.db.models import Count
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect
from django.forms import modelform_factory
from django.contrib.auth.decorators import login_required
import json
from django.http import JsonResponse
from django.views.decorators.cache import cache_page
from django.utils import timezone
import pytz
from django.db.models import Q
from core.mocfunctions import *
#from folium import Map
import folium
# To check if NaN
import math
# To send mail
from django.core.mail import EmailMultiAlternatives
THIS_PROJECT = PROJECT_ID["library"]
@login_required
# Control panel sections
# The main control panel views are in the core/views file, but these are library-specific
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
4755,
13,
27530,
1330,
1635,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
18941,
198,
6738,
42625,
14208,
13,
6371,
82,
... | 3.360759 | 316 |
from .bert import BERT, CustomBERT
from .language_model import BERTLM, CustomBERTLM
from .classifier import GoodBad
| [
6738,
764,
4835,
1330,
347,
17395,
11,
8562,
13246,
51,
198,
6738,
764,
16129,
62,
19849,
1330,
347,
1137,
14990,
44,
11,
8562,
13246,
14990,
44,
198,
6738,
764,
4871,
7483,
1330,
4599,
22069,
198
] | 3.314286 | 35 |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_int_field 1'] = '''{
int_field: [Int]
}'''
snapshots['test_int_fails 1'] = '''{
int_field: [Int]
}'''
snapshots['test_default_arg 1'] = '''{
int_field?: [Int] default=2
}'''
snapshots['test_print_schema 1'] = '''{
nested: {
int_field: [Int]
}
}'''
snapshots['test_print_schema 2'] = '''{
nested?: {
int_field?: [Int] default=3
} default={'int_field': 3}
}'''
snapshots['test_print_schema 3'] = '''{
nested?: {
int_field?: [Int]
} default={}
}'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11495,
1477,
24879,
25,
410,
16,
532,
3740,
1378,
42469,
13,
4743,
14,
89,
34,
19,
88,
52,
66,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
19... | 2.271186 | 295 |
"""
generate_plots_JCAP_2021.py is a Python routine that can be used
to generate the plots of A. Roper Pol, S. Mandal, A. Brandenburg, and
T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
It reads the pickle run variables that can be generated by the routines
initialize_JCAP_2021.py, initialize_PRR_2021.py, and initialize_PRD_2020.py
The function run() executes the code.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
# get working directory, where the runs and routines should be stored
dir0 = os.getcwd() + '/'
HOME = dir0 + '..'
os.chdir(HOME)
from dirs import read_dirs as rd
import plot_sets
import run as r
import interferometry as inte
import cosmoGW
import spectra
os.chdir(dir0)
def generate_table(runs, save=True, print_tex=False):
"""
Function that generates the Table I of A. Roper Pol, S. Mandal, A. Brandenburg,
and T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356, containing the
parameters of the simulations and the characteristic values of magnetic and
GW results.
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the table in tableI.csv
(default True)
"""
import pandas as pd
types = []
sigmas = []
EMmaxs = []
OmGWstat = []
PMs = []
PGWs = []
ks = []
ns = []
etas = []
for i in runs:
run = runs.get(i)
types.append(run.type)
sigmas.append(run.sig)
EMmaxs.append(run.EMmax)
OmGWstat.append(run.GWstat)
PMs.append(run.PM)
PGWs.append(run.PGW)
ks.append(run.k)
ns.append(run.n)
etas.append(run.eta)
types = np.array(types)
sigmas = np.array(sigmas)
EMmaxs = np.array(EMmaxs)
OmGWstat = np.array(OmGWstat)
PMs = np.array(PMs)
PGWs = np.array(PGWs)
ks = np.array(ks)
ns = np.array(ns)
etas = np.array(etas)
df = pd.DataFrame({'Type': types, 'sigma': sigmas, 'EEM^max': EMmaxs,
'Omega_GW^stat': OmGWstat, 'PPM': PMs,
'PPGW': PGWs, 'kf': ks, 'n': ns, 'eta': etas})
if save: df.to_csv('tableI.csv')
if print_tex:
for i in range(0, len(types)):
exp_EM = np.floor(np.log10(EMmaxs[i]))
bas_EM = EMmaxs[i]/10**exp_EM
exp_EGW = np.floor(np.log10(OmGWstat[i]))
bas_EGW = OmGWstat[i]/10**exp_EGW
exp_eta = np.floor(np.log10(etas[i]))
bas_eta = etas[i]/10**exp_eta
if sigmas[i] == '-0.01' or sigmas[i] == '-1':
PM_s = '$\!\!\!%.3f$'%PMs[i]
sig_s = '$\!\!\!%s$'%sigmas[i]
else:
PM_s = '\ $%.2f$'%PMs[i]
sig_s = '\ $%s$'%sigmas[i]
if sigmas[i] == '-1': PGW_s = '$\!\!%.2f$'%PGWs[i]
else: PGW_s = '\ $%.2f$'%PGWs[i]
print(types[i], '&', sig_s, '&',
"$%.2f \\times 10^{%i}$"%(bas_EM, exp_EM), '&',
"$%.2f \\times 10^{%i}$"%(bas_EGW, exp_EGW), '&',
PM_s, '&', PGW_s, '&', ks[i], '&', ns[i], '&',
"$%.0f \\times 10^{%i}$"%(bas_eta, exp_eta), '\\\\')
return df
def plot_st(k, sp, hel=False, yks=False, N=5, Ay=1., Ay2=[],
Ax=[], inds_show=[], inds_show_txt=[], fb=False, min_sp=[],
max_sp=[], min_sp_pos=[], max_sp_pos=[], min_sp_neg=[],
max_sp_neg=[], yv=False, color='black', marg=1.02, diff0 = 0.1):
"""
Function to plot the spectrum selected, with options to plot positive
and negative values in different colors for helical spectra.
Arguments:
k -- array of wave numbers
sp -- array of spectral values
hel -- option to separate positive and negative values for spectral
functions (red shows positive and blue shows negative)
(default False)
yks -- option to plot power law fits above (or below) spectra
(default True)
N -- number of power law fits to be used (default 5)
Ay -- factor multiplied to the power law fits for visualization
(default 1)
Ax, Ay2 -- factors multiplied to the x, y positions of the text k^{a/b}
of the power law fits. It should be given as an array of
length N (default values are 1s)
inds_show -- indices of the discretized spectra in power law fits where
we show the power law fit (default all of them)
inds_show_txt -- indices of the discretized spectra in power law fits
where we show the text k^{a/b} (default all of them)
fb -- option to use filled plot between maximum and minimum of
spectra (default False)
min_sp, max_sp -- if fb is selected we need to provide the minimum
and maximum of the spectral functions
min_sp_pos, min_sp_neg, max_sp_pos, max_sp_neg -- if fb and hel are
selected, we need to provide the maximum and minimum of
the spectral functions (both for the positive and negative
values)
yv -- option to plot vertical lines over the oscillations at the
data points (default False)
color -- color of the spectra lines of the plot (default 'black'),
this option does not change the color of positive and negative
values of helical spectra
marg -- factor that indicates the length of the power law fits shown,
in the interval (k0*marg, k1/marg) to show that the power laws
are discretized
diff0 -- margin allowed to approximate the power law exponent to
a fraction for the text shown k^{a/b} (default 0.1)
"""
if len(Ay2) == 0: Ay2 = [1.]*N
if len(Ax) == 0: Ax = [1.]*N
if hel:
k_pos, k_neg, sp_pos, sp_neg, color = spectra.red_blue_func(k, sp)
plt.plot(k_pos, sp_pos, '.', color='red')
plt.plot(k_neg, abs(sp_neg), '.', color='blue')
plt.plot(k, abs(sp), color='black', ls='dotted')
# fb option to plot shaded regions between minimum and maximum
# of oscillations of the helical GW energy density spectra
if fb or yv:
if len(min_sp_pos)*len(max_sp_pos)*len(min_sp_neg)* \
len(max_sp_neg) == 0:
print('\n with the arguments hel=True and fb=True or yv=True',
' you need to provide min_sp_pos, max_sp_pos',
' min_sp_neg, and max_sp_neg')
else:
if fb:
plt.fill_between(k, min_sp_pos, max_sp_pos, alpha=0.1,
color='red')
plt.fill_between(k, min_sp_neg, max_sp_neg, alpha=0.1,
color='blue')
if yv:
for i in range(0, len(k)):
plt.vlines(k[i], min_sp_neg[i], max_sp_neg[i],
color='blue', ls='dashed', lw=0.6)
plt.vlines(k[i], min_sp_pos[i], max_sp_pos[i],
color='red', ls='dashed', lw=0.6)
else:
plt.plot(k, sp, color=color, lw=2)
# fb option to plot shaded regions between minimum and maximum
# of oscillations of the helical GW energy density spectra
if fb or yv:
if len(min_sp)*len(max_sp) == 0:
print('\n with the arguments hel=False and fb=True or yv=True',
' you need to provide min_sp and max_sp_pos')
else:
if fb:
plt.fill_between(k, min_sp, max_sp, alpha=0.1, color=color)
if yv:
for i in range(0, len(k)):
plt.vlines(k[i], min_sp[i], max_sp[i], color=color,
ls='dashed', lw=0.6)
if yks:
ks, sp_ks, aks, km, sp_m, kps, c = spectra.compute_yks(k, abs(sp), N)
show = np.zeros(N)
show_txt = np.zeros(N)
if len(inds_show) > N:
print('the indices selected in inds_show cannot be more than',
' the number of discretized power laws N')
inds_show = np.array(range(0, N))
if len(inds_show) == 0: show += 1
else: show[inds_show] = 1
if len(inds_show_txt) > N: inds_show = np.array(range(0, N))
else: show_txt[inds_show_txt] = 1
kps = np.logspace(np.log10(ks[0]), np.log10(ks[-1]), N + 1)
for i in range(0, N):
str_exp = '$k^{%.0f}$'%aks[i]
for j in range(0, 6):
str_exp, diff = spectra.str_exp(str_exp, aks[i],
j + 1, diff=diff0)
if diff < diff0: break
else: diff0 = diff
if show[i]:
kss = np.logspace(np.log10(kps[i]*marg),
np.log10(kps[i + 1]/marg))
plt.plot(kss, kss**aks[i]*10**c[i]*Ay,
color='black', lw=.6)
if show_txt[i]:
txt = plt.text(km[i]*Ax[i], sp_m[i]*Ay*Ay2[i],
str_exp, size=30)
def plot_EM_EGW(run, save=True):
"""
Function that plots the magnetic energy and helicity spectra at the time
of maximum magnetic energy density.
It also plots the GW energy density and helicity spectra, averaging over
times after the GW energy has entered a stationary oscillatory stage (this
needs to be previously computed and stored in the run variable, see
initialize_JCAP_2021.py).
It corresponds to figures 1-3 of A. Roper Pol, S. Mandal, A. Brandenburg,
and T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
run -- variable run with spectral information
save -- option to save the figure in plots/'name_run'EM_EGW.pdf'
(default True)
"""
# auxiliary functions used in plot_EM_EGW
# chose indices to show power law fits and text with k^(a/b) for magnetic
# spectrum EM(k)
# chose indices to show power law fits and text with k^(a/b) for magnetic
# helicity spectrum HM(k)
# chose indices to show power law fits and text with k^(a/b) for GW
# spectrum EGW(k)
# chose indices to show power law fits and text with k^(a/b) for GW
# helicity spectrum HGW(k)
plt.rcParams.update({'xtick.labelsize': 'xx-large',
'ytick.labelsize': 'xx-large',
'axes.labelsize': 'xx-large'})
name = run.name_run
k = run.spectra.get('k')[1:]
t = run.spectra.get('t_helmag')
# read magnetic spectra
if run.turb == 'k':
EM = np.array(run.spectra.get('kin')[:, 1:], dtype='float')
HkM = np.array(run.spectra.get('helkin_comp')[:, 1:], dtype='float')
if run.turb == 'm':
EM = np.array(run.spectra.get('mag')[:, 1:], dtype='float')
HkM = np.array(run.spectra.get('helmag_comp')[:, 1:], dtype='float')
# read GW spectra
min_GWs = np.array(run.spectra.get('EGW_min_sp'), dtype='float')
max_GWs = np.array(run.spectra.get('EGW_max_sp'), dtype='float')
mean_GWs = np.array(run.spectra.get('EGW_stat_sp'), dtype='float')
min_pos_HGWs = abs(np.array(run.spectra.get('helEGW_pos_min_sp'),
dtype='float'))
min_neg_HGWs = abs(np.array(run.spectra.get('helEGW_neg_min_sp'),
dtype='float'))
max_pos_HGWs = abs(np.array(run.spectra.get('helEGW_pos_max_sp'),
dtype='float'))
max_neg_HGWs = abs(np.array(run.spectra.get('helEGW_neg_max_sp'),
dtype='float'))
mean_HGWs = np.array(run.spectra.get('helEGW_stat_sp'), dtype='float')
# get time that corresponds to maximum of magnetic energy density tini
dtk = t[1] - t[0]
indt = np.where(abs(t - run.tini) <= dtk/2)[0][0]
fig, ax = plt.subplots(figsize=(12,10))
plt.xscale('log')
plt.yscale('log')
#plt.text(3e4, 4e-23, '$k$', fontsize=30)
plt.xlabel('$k$')
TT = 'M'
if run.turb == 'k': TT = 'K'
plt.title(r'$E_{\rm %s, GW} (k)$ and $H_{\rm %s, GW} (k)$'%(TT, TT),
fontsize=30, pad=15)
# specific options for the plots
N_EM, Ay2_EM, Ax_EM = init_Ay2_Ax(12, 2., 1.)
N_HM, Ay2_HM, Ax_HM = init_Ay2_Ax(10, .04, 1.)
N_EGW, Ay2_EGW, Ax_EGW = init_Ay2_Ax(12, 2.5, 1.)
N_HGW, Ay2_HGW, Ax_HGW = init_Ay2_Ax(10, .08, .8)
Ay_HGW = .2
Ay_HM = .2
if 'i' in name:
xleg = 1.5e4
yleg = 5e-13
else:
xleg = 8e2
yleg = 5e-20
if '001' in name: yks_H = False
else: yks_H = True
# choose indices for which to show the power law fits and the power law
# text above the fit for each of the runs
inds_show_EM, inds_show_txt_EM, Ax_EM, Ay2_EM = \
indices_EM(name, Ax_EM, Ay2_EM)
inds_show_HM, inds_show_txt_HM, Ax_HM, Ay2_HM = \
indices_HM(name, Ax_HM, Ay2_HM)
inds_show_EGW, inds_show_txt_EGW, Ax_EGW, Ay2_EGW = \
indices_EGW(name, Ax_EGW, Ay2_EGW)
inds_show_HGW, inds_show_txt_HGW, Ax_HGW, Ay2_HGW = \
indices_HGW(name, Ax_HGW, Ay2_HGW)
if 'i' in name: str_typ = 'ini'
else: str_typ = 'forc'
if name == 'M0': sig_val = '0'
if '01' in name and 'i' in name: sig_val = '0.1'
if '01' in name and 'M' in name: sig_val = '0.1'
if '01' in name and 'K' in name: sig_val = '0.1'
if '3' in name: sig_val = '0.3'
if '5' in name: sig_val = '0.5'
if '7' in name: sig_val = '0.7'
if name == 'i_s1': sig_val = '1'
if name == 'M1' or name == 'K1': sig_val = '1'
if '001' in name and 'f' in name:
if 'neg' in name: sig_val = '-0.01'
else: sig_val = '0.01'
if name == 'f_s1_neg': sig_val = '-1'
str_leg = r'$\sigma^{\rm %s}_{\rm M}=%s$'%(str_typ, sig_val)
plot_st(k, EM[indt, :], yks=True, N=N_EM, inds_show=inds_show_EM,
inds_show_txt=inds_show_txt_EM, Ay=4, Ay2=Ay2_EM, Ax=Ax_EM)
plot_st(k, HkM[indt, :], hel=True, yks=yks_H, N=N_HM,
inds_show=inds_show_HM, inds_show_txt=inds_show_txt_HM,
Ay=Ay_HM, Ay2=Ay2_HM, Ax=Ax_HM)
plot_st(k, mean_GWs, fb=True, yv=True, min_sp=min_GWs, max_sp=max_GWs,
yks=True, N=N_EGW, inds_show=inds_show_EGW, Ax=Ax_EGW,
inds_show_txt=inds_show_txt_EGW, Ay=4, Ay2=Ay2_EGW)
plot_st(k, mean_HGWs, hel=True, fb=True, yv=True, min_sp_neg=min_neg_HGWs,
max_sp_neg=max_neg_HGWs, min_sp_pos=min_pos_HGWs,
max_sp_pos=max_pos_HGWs, yks=yks_H, N=N_HGW,
inds_show=inds_show_HGW, inds_show_txt=inds_show_txt_HGW,
Ay=Ay_HGW, Ay2=Ay2_HGW, Ax=Ax_HGW)
handles = []
line_mag, = ax.plot([], [], '-', color='black', label=r'$E_{\rm %s}(k)$'%TT)
line_helpos, = ax.plot([], [], '.',
label=r'$+\frac{1}{2} k H_{\rm %s}(k)$'%TT,
color='red')
line_helneg, = ax.plot([], [], '.',
label=r'$-\frac{1}{2} k H_{\rm %s}(k)$'%TT,
color='blue')
line_GW, = ax.plot([], [], color='black', label=r'$E_{\rm GW}(k)$')
line_HGWpos, = ax.plot([], [], '.', label=r'$+H_{\rm GW} (k)$',
color='red')
line_HGWneg, = ax.plot([], [], '.', label=r'$-H_{\rm GW}(k)$',
color='blue')
handles = [line_mag, line_helpos, line_helneg]
lgd1 = ax.legend(handles=handles, loc='upper right', fontsize=23,
frameon=False)
handles2 = [line_GW, line_HGWpos, line_HGWneg]
lgd2 = plt.legend(handles=handles2, loc='lower left', fontsize=23,
frameon=False)
ax = plt.gca()
ax.add_artist(lgd1)
plt.yticks([1e-21, 1e-19, 1e-17, 1e-15, 1e-13, 1e-11, 1e-9,
1e-7, 1e-5, 1e-3])
plot_sets.axes_lines()
plt.tick_params(axis='y', direction='out', length=10)
plt.text(xleg, yleg, str_leg, fontsize=30,
bbox=dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.5'))
plt.xlim(100, 8e4)
plt.ylim(1e-21, 1e-3)
if save: plt.savefig('plots/' + run.name_run + 'EM_EGW.pdf',
bbox_inches='tight')
def plot_helicity_vs_t(runs, type='ini', save=True):
"""
Function that generates the plot of the total magnetic or kinetic helicity
as a function of time.
It corresponds to figure 4 of A. Roper Pol, S. Mandal, A. Brandenburg,
and T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary that includes the run variables
type -- selects the types of runs to be plotted (default 'ini', other
option is 'forc'), i.e., runs with an initial magnetic field
('ini') or runs in which the magnetic field is initially driven
during the simulation ('forc')
save -- option to save the figure in plots/sigmaM_vs_t_'type'.pdf'
(default True)
"""
if type == 'ini': RR = ['i_s01', 'i_s03', 'i_s05', 'i_s07', 'i_s1']
elif type == 'forc':
RR = ['f_s001', 'f_s001_neg', 'f_s03', 'f_s05', 'f_s07', 'f_s1_neg']
elif type == 'kin': RR = ['K0', 'K01_c', 'K03', 'K05', 'K1']
elif type == 'mag': RR = ['M0', 'M01_c', 'M03', 'M05', 'M1']
plt.figure(figsize=(12,8))
for i in RR:
run = runs.get(i)
col = assign_col(i)
if run.turb == 'm': sp = 'mag'
if run.turb == 'k': sp = 'kin'
t = np.array(run.spectra.get('t_hel' + sp), dtype='float')[:, 0]
t2 = run.spectra.get('t_' + sp)
EM = np.array(run.spectra.get(sp), dtype='float')
HkM = run.spectra.get('hel' + sp + '_comp')
k = run.spectra.get('k')
EMs_mean = np.trapz(EM, k, axis=1)
HMs_mean = np.trapz(HkM, k, axis=1)
EMs_mean = np.interp(t, t2, EMs_mean)
eps = abs(HMs_mean)/EMs_mean
plt.plot(t - 1, eps, '.', color=col)
if col == 'black': sig = 0
if col == 'green': sig = 0.1
if col == 'darkgreen': sig = 0.3
if col == 'orange': sig = 0.5
if col == 'darkorange': sig = 0.7
if col == 'red' or col == 'blue': sig = 1.
eps = 2*sig/(1 + sig**2)
plt.hlines(eps, 1e-5, 5, color=col, ls='dashed', lw=0.5)
if type == 'ini': tp = 'ini'
else: tp = 'forc'
#line_s0, line_s001, line_s01, line_s03, line_s05, line_s07, \
# line_s1, line_s1_neg, = get_lines_sig(tp)
#hdls1 = [line_s001, line_s01, line_s03, line_s05, line_s07, line_s1,]
#plt.legend(handles=hdls1, fontsize=24, loc='center left')
MM = 'M'
if type == 'kin': MM = 'K'
sig_s = r'$\sigma_{\rm %s}^{\rm %s} = $'%(MM, tp)
if type != 'forc':
plt.text(2.5e-3, .12, sig_s + ' 0.1', color='green', fontsize=24)
if type != 'ini':
plt.text(2.5e-3, -.08, sig_s + ' 0', color='black', fontsize=24)
else:
plt.text(2.5e-3, .04, sig_s + ' $\pm 0.01$', color='black', fontsize=24)
plt.text(2.5e-3, .47, sig_s + ' 0.3', color='darkgreen', fontsize=24)
plt.text(2.5e-3, .72, sig_s + ' 0.5', color='orange', fontsize=24)
if type == 'ini' or type == 'forc':
plt.text(2.5e-3, .86, sig_s + ' 0.7', color='darkorange', fontsize=24)
if type != 'forc':
plt.text(2.5e-3, 1.04, sig_s + ' $1$', color='red', fontsize=24)
else:
plt.text(2.5e-3, 1.04, sig_s + ' $-1$', color='blue', fontsize=24)
plot_sets.axes_lines()
plt.xscale('log')
plt.xlim(2e-3, 5e-1)
plt.ylim(-.1, 1.13)
if type == 'mag' or type == 'kin':
plt.ylim(-.15, 1.13)
plt.xlim(2e-3, 1.5e0)
plt.xlabel('$\delta t=t-1$')
plt.ylabel(r'$|{\cal P}_{\rm M}(t)|$')
plt.yticks(np.linspace(0, 1, 5))
if save: plt.savefig('plots/sigmaM_vs_t_' + type + '.pdf',
bbox_inches='tight')
def plot_PGW(runs, PPh='GW', type='ini', save=True):
"""
Function that plots the GW polarization spectra, averaging over
times after the GW energy and helicity have entered stationary oscillatory
stages (this needs to be previously computed and stored in the run variable,
see initialize_JCAP_2021.py).
It corresponds to figure 5 of A. Roper Pol, S. Mandal, A. Brandenburg,
and T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary of variables run with spectral information
type -- selects the types of runs to be plotted (default 'ini', other
option is 'forc'), i.e., runs with an initial magnetic field
('ini') or runs in which the magnetic field is initially driven
during the simulation ('forc')
save -- option to save the figure in plots/PGW_'type'_sigma.pdf'
(default True)
"""
plt.rcParams.update({'xtick.labelsize': 'xx-large',
'ytick.labelsize': 'xx-large',
'axes.labelsize': 'xx-large'})
if type == 'ini': RR = ['i_s01', 'i_s03', 'i_s05', 'i_s07', 'i_s1']
elif type == 'forc':
RR = ['f_s001', 'f_s001_neg', 'f_s03', 'f_s05', 'f_s07', 'f_s1_neg']
elif type == 'kin': RR = ['K0', 'K01_c', 'K03', 'K05', 'K1']
elif type == 'mag': RR = ['M0', 'M01_c', 'M03', 'M05', 'M1']
if PPh == 'GW': PP = 'GW'
if PPh == 'h': PP = 'h'
fig, ax = plt.subplots(figsize=(12,10))
for i in RR:
# select colors
col = assign_col(i)
run = runs.get(i)
k = run.spectra.get('k')[1:]
PGW = run.spectra.get('P' + PP + '_stat_sp')
PGW_min = run.spectra.get('P' + PP + '_min_sp')
PGW_max = run.spectra.get('P' + PP + '_max_sp')
plt.plot(k, PGW, color=col, lw=2)
plt.fill_between(k, PGW_min, PGW_max, alpha=0.3, color=col)
for i in range(0, len(k)):
plt.vlines(k[i], PGW_min[i], PGW_max[i], color=col, lw=0.6,
ls='dashed')
plot_sets.axes_lines()
plt.xscale('log')
plt.xlabel('$k$')
plt.ylabel(r'${\cal P}_{\rm %s} (k)$'%PP)
sigs = []
plt.xlim(120, 5e4)
tp = 'forc'
if type == 'forc':
sigs = ['0.01', '-0.01', '0.3', '0.5', '0.7', '-1']
cols = ['black', 'black', 'darkgreen', 'orange', 'darkorange', 'blue']
plt.ylim(-1.15, 1.15)
plt.yticks(np.linspace(-1, 1, 9))
if PPh == 'GW':
xxs = [7e2, 7e2, 7e2, 8e3, 2.5e3, 7e2]
yys = [.2, -.25, .4, .55, .9, -.9, -.9]
plt.text(3e4, -0.9, '(b)', fontsize=30)
else:
xxs = [5e2, 5e2, 5e2, 4e3, 3e3, 5e2]
yys = [.25, -.25, .5, .5, .7, -.8]
plt.text(3e4, -0.9, '(d)', fontsize=30)
else:
if type == 'ini':
tp = 'ini'
sigs = ['0.1', '0.3', '0.5', '0.7', '1']
cols = ['green', 'darkgreen', 'orange', 'darkorange', 'red']
if PPh == 'GW':
xxs = [1e4, 1e4, 1e4, 1e4, 1e3]
yys = [.3, .5, .75, 1.05, 1.05]
plt.text(3e4, -0.35, '(a)', fontsize=30)
else:
xxs = [1.5e4, 1.5e4, 1.5e4, 1.5e4, 1e3]
yys = [0.05, 0.35, 0.62, 1.05, 1.05]
plt.text(3e4, -0.35, '(c)', fontsize=30)
else:
line_s0, line_s001, line_s01, line_s03, line_s05, line_s07, \
line_s1, line_s1_neg, = get_lines_sig(tp)
hdls = [line_s1, line_s05, line_s03, line_s01, line_s0,]
plt.legend(handles=hdls, fontsize=24, loc='upper right',
frameon=False)
plt.xlim(120, 3e4)
plt.ylim(-.5, 1.2)
for i in range(0, len(sigs)):
plt.text(xxs[i], yys[i],
r'$\sigma_{\rm M}^{\rm %s}=%s$'%(tp, sigs[i]),
fontsize=30, color=cols[i])
if save: plt.savefig('plots/P' + PPh + '_' + type + '_sigma.pdf',
bbox_inches='tight')
def plot_PGW_vs_PM(runs, save=True):
"""
Function that generates the plot of the total GW polarization PGW as a
function of the total fractional helicity of the sourcing magnetic PM or
velocity PK field.
It corresponds to figure 6 of A. Roper Pol, S. Mandal, A. Brandenburg,
and T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary that includes the run variables
save -- option to save the figure in plots/PGW_vs_PM.pdf'
(default True)
"""
plt.figure(figsize=(12,8))
for i in runs:
run = runs.get(i)
k = run.spectra.get('k')
EGW = run.spectra.get('EGW_stat_sp')
HGW = run.spectra.get('helEGW_stat_sp')
t = run.spectra.get('t_mag')
indt = 0
EM = run.spectra.get('mag')[indt, :]
HM = run.spectra.get('helmag_comp')[indt, :]
PM = np.trapz(HM, k)/np.trapz(EM, k)
PGW = np.trapz(HGW, k[1:])/np.trapz(EGW, k[1:])
if 'i' in i: plt.plot(abs(PM), abs(PGW), 'o', color='blue')
else: plt.plot(abs(PM), abs(PGW), 'x', color='red')
col = assign_col(i)
if col == 'black': sig = 0
if col == 'green': sig = 0.1
if col == 'darkgreen': sig = 0.3
if col == 'orange': sig = 0.5
if col == 'darkorange': sig = 0.7
if col == 'red' or col == 'blue': sig = 1.
if col == 'blue': col = 'red'
eps = 2*sig/(1 + sig**2)
plt.vlines(eps, 0, 2*eps/(1 + eps**2), color=col, ls='dashed', lw=0.5)
plot_sets.axes_lines()
plt.xlim(0, 1.05)
plt.ylim(0, 1.1)
plt.xlabel(r'$|{\cal P}_{\rm M}|$')
plt.ylabel(r'$|{\cal P}_{\rm GW}|$')
xx = np.linspace(0, 1.1)
plt.plot(xx, xx, lw=.5, ls='dashed', color='black')
plt.plot(xx, 2*xx/(1 + xx**2), lw=.5, ls='dashed', color='black')
plt.text(.06, .75, r'${\cal P}_{\rm GW}=2 {\cal P}_{\rm M}' + \
r'/\bigl(1 + {\cal P}_{\rm M}^2\bigr)$',
fontsize=24, bbox=dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.2'))
plt.text(.3, .2, r'${\cal P}_{\rm GW}={\cal P}_{\rm M} = 2' + \
r'\sigma_{\rm M}/\bigl(1 + \sigma_{\rm M}^2\bigr)$',
fontsize=24, bbox=dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.2'))
#plt.legend(fontsize=14)
line_ini, = plt.plot([], [], 'o', color='blue', label='initial')
line_forc, = plt.plot([], [], 'x', color='red', label='forcing (short)')
hdls = [line_ini, line_forc,]
plt.legend(handles=hdls, fontsize=24, loc='upper left', frameon=False)
ax = plt.gca()
ax.tick_params(axis='x', pad=20)
ax.tick_params(axis='y', pad=10)
if save: plt.savefig('plots/PGW_vs_PM.pdf', bbox_inches='tight')
def plot_efficiency(runs, save=True):
"""
Function that generates the plot of the GW production efficiency for
different runs as a function of time.
It corresponds to figure 10 of A. Roper Pol, S. Mandal, A. Brandenburg,
and T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary that includes the run variables
save -- option to save the figure in plots/OmGW_efficiency_vs_t.pdf'
(default True)
"""
plt.figure(figsize=(12,8))
RR = ['i_s01', 'i_s03', 'i_s05', 'i_s07', 'i_s1', 'f_s001', 'f_s001_neg',
'f_s03', 'f_s05', 'f_s07', 'f_s1_neg', 'M0', 'M01_c', 'M03', 'M05',
'M1', 'ini1', 'ini2', 'ini3', 'hel1', 'hel2', 'hel3', 'hel4',
'noh1', 'noh2', 'ac1', 'ac2', 'ac3']
for i in RR:
# select colors
col = assign_col(i)
run = runs.get(i)
t = run.ts.get('t')
EGW = run.ts.get('EEGW')
if run.turb=='m':
Om = run.OmMmax
kf = run.kfM
if run.turb=='k':
Om = run.OmKmax
kf = run.kfK
if abs(kf - 510.9) < 10 or 'M' in i: kf = 708
if abs(kf - 5999) < 100: kf=7000
lww = 1.
ls = 'solid'
if 'M' in i: lww = .6
if 'ac' in i or 'hel' in i:
lww = .8; ls = 'dotted'
if 'ini' in i or 'noh' in i:
lww = .8; ls = 'dotted'
plt.plot(t-1, np.sqrt(EGW)/Om*kf, color=col, ls=ls, lw=lww)
line_s0, line_s001, line_s01, line_s03, line_s05, line_s07, \
line_s1, line_s1_neg, = get_lines_sig('')
hdls1 = [line_s001, line_s01, line_s03, line_s05, line_s07, line_s1,]
plt.text(.2, np.sqrt(.4), 'initial')
plt.text(.08, 3.1, 'forcing (short)')
plt.text(.007, 6, 'forcing (short, $k_*\sim 60$)')
plt.text(1.2, np.sqrt(30), 'forcing (long)')
plt.text(2e-1, 10, 'acoustic')
plt.legend(handles=hdls1, fontsize=24, framealpha=1)
plot_sets.axes_lines()
plt.xlim(6e-3, 4)
plt.ylim(1e-1, 2e1)
plt.xlabel('$\delta t=t-1$')
plt.ylabel(r'$k_*\, \Omega_{\rm GW}^{1/2} (t)/{\cal E}_{\rm M,K}^{\rm max}$')
plt.yscale('log')
plt.xscale('log')
if save: plt.savefig('plots/OmGW_efficiency_vs_t.pdf', bbox_inches='tight')
def plot_OmGW_vs_f(runs, type='ini', T=1e5*u.MeV, g=100, SNR=10, Td=4, OmM=.1,
Xi=False, save=True):
"""
Function that generates the plot of the GW energy density frequency
spectra at present time compared to the LISA, Taiji, BBO, and DECIGO
sensitivities and power law sensitivities (PLS).
It produces figure 11-13 of A. Roper Pol, S. Mandal, A. Brandenburg,
and T. Kahniashvili, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary that includes the run variables
type -- selects the types of runs to be plotted (default 'ini', other
option is 'forc'), i.e., runs with an initial magnetic field
('ini') or runs in which the magnetic field is initially driven
during the simulation ('forc')
T -- temperature scale (in natural units) at the time of turbulence
generation (default 100 GeV, i.e., electroweak scale)
g -- number of relativistic degrees of freedom at the time of
turbulence generation (default 100, i.e., electroweak scale)
SNR -- signal-to-noise ratio (SNR) of the resulting PLS (default 10)
Td -- duration of the mission (in years) of the resulting PLS
(default 4)
save -- option to save the resulting figure as
plots/'OmGW'_'type'_detectors_'Om'.pdf (default True)
where 'OmGW' = OmGW or XiGW (for Xi False and True,
respectively), 'Om' = OmM005 or OmM01, and 'type' is 'ini'
or 'forc'
"""
# read LISA and Taiji sensitivities
CWD = os.getcwd()
os.chdir('..')
if Xi:
fs, LISA_Om, LISA_OmPLS, LISA_Xi, LISA_XiPLS = \
inte.read_sens(SNR=SNR, T=Td, Xi=True)
fs_Tai, Taiji_Om, Taiji_OmPLS, Taiji_Xi, Taiji_XiPLS = \
inte.read_sens(SNR=SNR, T=Td, interf='Taiji', Xi=True)
fs_comb, LISA_Taiji_Xi, LISA_Taiji_XiPLS = \
inte.read_sens(SNR=SNR, T=Td, interf='comb')
fs_comb = fs_comb*u.Hz
else:
fs, LISA_Om, LISA_OmPLS = inte.read_sens(SNR=SNR, T=Td)
fs_Tai, Taiji_Om, Taiji_OmPLS = inte.read_sens(SNR=SNR, T=Td,
interf='Taiji')
dir = 'detector_sensitivity'
f_DECIGO, DECIGO_OmPLS = inte.read_csv(dir, 'DECIGO_PLS_SNR10')
ff_D = np.logspace(np.log10(f_DECIGO[0]), np.log10(f_DECIGO[-1]), 100)
DECIGO_OmPLS = 10**np.interp(ff_D, f_DECIGO, np.log10(DECIGO_OmPLS))
f_DECIGO = ff_D*u.Hz
f_BBO, BBO_OmPLS = inte.read_detector_PLIS_Schmitz(det='BBO', SNR=SNR,
T=Td)
f_BBO = f_BBO*u.Hz
fs = fs*u.Hz
fs_Tai = fs_Tai*u.Hz
os.chdir(CWD)
# internal function to compute f, OmGW as present time observables
plt.rcParams.update({'xtick.labelsize': 'xx-large',
'ytick.labelsize': 'xx-large',
'axes.labelsize': 'xx-large'})
if type == 'ini': RR = ['i_s01', 'i_s03', 'i_s05', 'i_s07', 'i_s1']
elif type == 'forc':
RR = ['f_s001', 'f_s001_neg', 'f_s03', 'f_s05', 'f_s07', 'f_s1_neg']
elif type == 'kin': RR = ['K0', 'K01_c', 'K03', 'K05', 'K1']
elif type == 'mag': RR = ['M0', 'M01_c', 'M03', 'M05', 'M1']
fig, ax = plt.subplots(figsize=(12,10))
for i in RR:
# select colors
col = assign_col(i)
run = runs.get(i)
ratio = OmM/run.Ommax
f, OmGW = shift_Omega(run, T, g, ratio, Xi, sp='stat')
_, OmGW_min = shift_Omega(run, T, g, ratio, Xi, sp='min')
_, OmGW_max = shift_Omega(run, T, g, ratio, Xi, sp='max')
plt.plot(f, abs(OmGW), color=col, lw=3)
#if 'i_s1' not in i:
#if type == 'ini' or type == 'forc':
plt.fill_between(f, OmGW_min, OmGW_max, alpha=0.1, color=col)
for i in range(0, len(f)):
plt.vlines(f.value[i], OmGW_min[i], OmGW_max[i], color=col,
lw=0.6, ls='dashed')
if type == 'ini': tp = 'ini'
else: tp = 'forc'
line_s0, line_s001, line_s01, line_s03, line_s05, line_s07, \
line_s1, line_s1_neg, = get_lines_sig(tp)
if type == 'ini': hdls = [line_s01, line_s03, line_s05, line_s07, line_s1,]
elif type == 'forc':
hdls = [line_s001, line_s03, line_s05, line_s07, line_s1_neg,]
else:
hdls = [line_s0, line_s01, line_s03, line_s05, line_s1,]
plt.legend(handles=hdls, fontsize=24, loc='upper right', framealpha=1)
Om = '0.1'
if OmM == .05: Om = '0.05'
xb = 2e-3
yb = 8e-11
if Xi:
xb = 1e-3
yb = 1e-17
MM = 'M'
if type == 'kin': MM = 'K'
plt.text(xb, yb, r'${\cal E}^{\rm max}_{\rm %s} = %s$'%(MM, Om),
fontsize=30, bbox=dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.5'))
if type == 'ini':
xB = 1.2e-2; yB = 1e-16
xD = 2e-3; yD = 6e-16
if OmM == 0.1: pan = '(a)'
else: pan = '(c)'
else:
xB = 3e-2; yB = 3e-17
xD = 2.5e-2; yD = 1e-15
if OmM == 0.1: pan = '(b)'
else: pan = '(d)'
if type == 'kin': pan = '(a)'
if type == 'mag': pan = '(b)'
if Xi:
plt.plot(fs, LISA_XiPLS, color='lime', ls='-.', lw=1)
plt.text(1.1e-3, 5e-11, 'LISA', fontsize=30, color='lime')
plt.plot(fs_Tai, Taiji_XiPLS, color='crimson', ls='-.', lw=1)
plt.text(1e-2, 4e-11, 'Taiji', fontsize=30, color='crimson')
plt.plot(fs_comb, LISA_Taiji_XiPLS, color='navy', ls='-.', lw=1)
plt.text(1e-2, 2e-13, r'LISA--Taiji', fontsize=30, color='navy')
else:
plt.plot(fs, LISA_OmPLS, color='lime', ls='-.', lw=1)
plt.text(7e-3, 4e-12, 'LISA', fontsize=30, color='lime')
plt.plot(fs_Tai, Taiji_OmPLS, color='crimson', ls='-.', lw=1)
plt.text(1.3e-2, 1e-13, 'Taiji', fontsize=30, color='crimson')
plt.plot(f_BBO, BBO_OmPLS, color='navy', ls='-.', lw=1)
plt.text(xB, yB, 'BBO', fontsize=30, color='navy')
plt.plot(f_DECIGO, DECIGO_OmPLS, color='royalblue', ls='-.', lw=1)
plt.text(xD, yD, 'DECIGO', fontsize=30, color='royalblue')
plt.text(4e-4, 4e-18, pan, fontsize=30)
plot_sets.axes_lines()
plt.xscale('log')
plt.yscale('log')
plt.xlim(3e-4, 2e-1)
plt.ylim(1e-18, 1e-8)
plt.xlabel('$f$ [Hz]')
if Xi: plt.ylabel(r'$h_0^2 |\Xi_{\rm GW} (f)|$')
else: plt.ylabel(r'$h_0^2 \Omega_{\rm GW} (f)$')
Om_save = 'OmM01'
if OmM == 0.05: Om_save = 'OmM005'
OmGW_s = 'OmGW'
if Xi: OmGW_s = 'XiGW'
if save:
plt.savefig('plots/%s_%s_detectors_%s.pdf'%(OmGW_s, type, Om_save),
bbox_inches='tight')
def plot_PM_beltrami(save=True):
"""
Function that plots the analytical relations between the GW polarization P
and the source helicity PM (for magnetic) for the Beltrami field model,
compared to the empirical fit obtained in the numerical simulations.
It produces figure 14 (appendix A) of A. Roper Pol, S. Mandal,
A. Brandenburg, and T. Kahniashvili, "Polarization of gravitational waves
from helical MHD turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
save -- option to save the figure in plots/PM_beltrami.pdf'
(default True)
"""
plt.figure(figsize=(12,8))
sigma = np.linspace(0, 1, 100)
Sh_sig = .5+2*sigma**2/(1+sigma**2)**2
Pm_sig = 2*sigma/(1+sigma**2)
Sh_sig = .5*(1 + Pm_sig**2)
Ph_sig = Pm_sig/Sh_sig
plt.plot(sigma, Pm_sig, ls='dotted', color='red',
label=r'${\cal P}_{\rm M}$')
plt.plot(sigma, Sh_sig, ls='-.', color='blue',
label=r'$\left(1 + {\cal P}_{\rm M}^2\right)/2$')
plt.plot(sigma, Ph_sig, color='black', lw=.8,
label=r'$2 {\cal P}_{\rm M}/\left(1 + {\cal P}_{\rm M}^2\right)$')
plot_sets.axes_lines()
plt.xlim(0, 1)
plt.ylim(0, 1.1)
plt.legend(fontsize=28, loc='lower right', frameon=False)
plt.xlabel('$\sigma$')
plt.ylabel(r'$\cal P$')
ax = plt.gca()
ax.tick_params(axis='x', pad=20)
plt.yticks(np.linspace(0, 1, 5))
if save: plt.savefig('plots/PM_beltrami.pdf', bbox_inches='tight')
| [
37811,
198,
8612,
378,
62,
489,
1747,
62,
41,
33177,
62,
1238,
2481,
13,
9078,
318,
257,
11361,
8027,
326,
460,
307,
973,
198,
1462,
7716,
262,
21528,
286,
317,
13,
371,
3575,
2165,
11,
311,
13,
41715,
11,
317,
13,
13512,
37036,
1... | 1.89384 | 20,177 |
class CNNAnswer(object):
"""
ответы отправляются в таком формате
"""
pass
| [
4871,
8100,
33706,
7,
15252,
2599,
198,
197,
37811,
198,
197,
15166,
20375,
38857,
16843,
20375,
45035,
12466,
122,
20375,
140,
123,
21169,
16142,
38857,
30143,
40623,
141,
236,
20375,
21727,
40623,
12466,
110,
220,
20375,
16142,
31583,
254... | 1.392857 | 56 |
"""
@author: Pranay Pradhananga, Damian Katsigiannis
@filename: Goto.py
@title: Goto
@description: Goto is a type of Expression and implement Expression abstract class.
Goto simply refers to the operation specified by the line number and operates the operation
"""
import abc
from Operation import Operation
class Goto(Operation):
"""
Operate the referred operation and return the result
"""
| [
37811,
198,
31,
9800,
25,
1736,
272,
323,
1736,
324,
7637,
16484,
11,
42574,
47244,
328,
666,
21361,
198,
31,
34345,
25,
402,
2069,
13,
9078,
198,
31,
7839,
25,
402,
2069,
198,
31,
11213,
25,
402,
2069,
318,
257,
2099,
286,
41986,
... | 3.45082 | 122 |
from typing import List
from pathlib import Path
from ..io import log
class Resource:
"""Wrapper around Path used for managing config files and directories"""
class ConfigDir(Resource):
"""Directory containing config files"""
class ConfigFile(Resource):
"""Class used to manage config files that may have many possible extensions"""
YAML = ['yml', 'yaml']
| [
6738,
19720,
1330,
7343,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
11485,
952,
1330,
2604,
628,
198,
4871,
20857,
25,
198,
220,
220,
220,
37227,
36918,
2848,
1088,
10644,
973,
329,
11149,
4566,
3696,
290,
29196,
37811,
628,
198,
48... | 3.828283 | 99 |
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "d01"
__email__ = "jungflor@gmail.com"
__copyright__ = "Copyright (C) 2019, Florian JUNG"
__license__ = "MIT"
__version__ = "0.1.0"
__date__ = "2019-03-26"
# Created: 2019-03-21 16:02
import datetime
import flotils
logger = flotils.get_logger()
if __name__ == "__main__":
import logging.config
from flotils.logable import default_logging_config
logging.config.dictConfig(default_logging_config)
logging.getLogger().setLevel(logging.DEBUG)
test_save_load_json()
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
13... | 2.718876 | 249 |
"""Generated class for state_discovery.json"""
from .state_discovery_family import FamilyDiscoveryState
from .state_discovery_family import FamilyDiscoveryState
class DiscoveryState:
"""Generated schema class"""
@staticmethod
@staticmethod
@staticmethod
| [
37811,
8645,
515,
1398,
329,
1181,
62,
67,
40821,
13,
17752,
37811,
198,
6738,
764,
5219,
62,
67,
40821,
62,
17989,
1330,
7884,
35,
40821,
9012,
198,
6738,
764,
5219,
62,
67,
40821,
62,
17989,
1330,
7884,
35,
40821,
9012,
628,
198,
... | 3.814286 | 70 |
"""
Classes to provide lazy players that are treatable as an entity ID but
do not have to receive one immediately.
"""
from typing import Dict, List, Optional, Union
from hearthstone.enums import GameType
from .exceptions import MissingPlayerData
from .tokens import UNKNOWN_HUMAN_PLAYER
from .utils import is_mercenaries_game_type
| [
37811,
198,
9487,
274,
284,
2148,
16931,
1938,
326,
389,
2190,
540,
355,
281,
9312,
4522,
475,
198,
4598,
407,
423,
284,
3328,
530,
3393,
13,
198,
37811,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
11,
4479,
198,
198,
673... | 3.568421 | 95 |
import tkinter as tk
from tkinter import ttk
import pandas as pd
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
11748,
19798,
292,
355,
279,
67,
628
] | 2.869565 | 23 |
# pylint: disable=no-self-use,invalid-name
from __future__ import with_statement
from __future__ import division
from __future__ import absolute_import
import pytest
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from io import open
# Skip this one, it's an expensive test.
TestOpenaiTransformerEmbedderLarge = Skip this one, it's an expensive test.
@pytest.mark.skip()(TestOpenaiTransformerEmbedderLarge)
def create_small_test_fixture(output_dir = u'/tmp') :
u"""
This is how I created the transformer_model.tar.gz.
After running this, go to the specified output dir and run
tar -czvf transformer_model.tar.gz model/
In case you need to regenerate the fixture for some reason.
"""
import json
import pathlib
from allennlp.modules.openai_transformer import OpenaiTransformer
model_dir = pathlib.Path(output_dir) / u'model'
model_dir.mkdir(exist_ok=True) # pylint: disable=no-member
symbols = [u"e", u"w", u"o", u"wo", u"."]
byte_pairs = [(sym1, sym2 + end)
for sym1 in symbols # prefer earlier first symbol
for sym2 in symbols # if tie, prefer earlier second symbol
for end in (u'</w>', u'')] # if tie, prefer ending a word
encoding = dict(("{sym1}{sym2}", idx) for idx, (sym1, sym2) in enumerate(byte_pairs))
encoding[u"<unk>"] = 0
with open(model_dir / u'encoder_bpe.json', u'w') as encoder_file:
json.dump(encoding, encoder_file)
with open(model_dir / u'vocab.bpe', u'w') as bpe_file:
bpe_file.write(u"#version 0.0\n")
for sym1, sym2 in byte_pairs:
bpe_file.write("{sym1} {sym2}\n")
bpe_file.write(u"\n")
transformer = OpenaiTransformer(embedding_dim=10, num_heads=2, num_layers=2, vocab_size=(50 + 50), n_ctx=50)
transformer.dump_weights(output_dir, num_pieces=2)
| [
2,
279,
2645,
600,
25,
15560,
28,
3919,
12,
944,
12,
1904,
11,
259,
12102,
12,
3672,
198,
6738,
11593,
37443,
834,
1330,
351,
62,
26090,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
... | 2.44557 | 790 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from art_gallery_web.core.cleaned_up_files import cleaned_up_files
from art_gallery_web.core.decorators import user_is_entry_author
from art_gallery_web.gallery_app.forms import CreateArtForm, PostForm
from art_gallery_web.gallery_app.models import Arts
@login_required
@login_required
@user_is_entry_author
@login_required
@user_is_entry_author
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
628,
198,
6738,
1242,
62,
24460,
62,
12384,
13,
7295,
13,
2375,
22739,
... | 3.171233 | 146 |
# Autogenerated file for Kivy configuration
PY3 = 0
CYTHON_MIN = '0.20'
CYTHON_MAX = '0.21.2'
CYTHON_BAD = '0.22, 0.22.beta0, 0.22.alpha0'
USE_RPI = 0
USE_OPENGL_ES2 = 0
USE_OPENGL_DEBUG = 0
USE_GLEW = 1
USE_SDL2 = 1
USE_IOS = 0
USE_MESAGL = 0
USE_X11 = 0
USE_GSTREAMER = 0
USE_AVFOUNDATION = 0
USE_OSX_FRAMEWORKS = 0
DEBUG = False
| [
2,
5231,
519,
877,
515,
2393,
329,
509,
452,
88,
8398,
201,
198,
47,
56,
18,
796,
657,
201,
198,
34,
56,
4221,
1340,
62,
23678,
796,
705,
15,
13,
1238,
6,
201,
198,
34,
56,
4221,
1340,
62,
22921,
796,
705,
15,
13,
2481,
13,
... | 1.80829 | 193 |
from flask import Flask
app = Flask(__name__)
app.config.from_object('settings')
import govhack2014.routes # noqa
| [
6738,
42903,
1330,
46947,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
11250,
13,
6738,
62,
15252,
10786,
33692,
11537,
198,
198,
11748,
467,
85,
31153,
4967,
13,
81,
448,
274,
220,
220,
1303,
645,
20402,
198
] | 2.809524 | 42 |
"""Global constants controlling appearance"""
font_family = "Shree Devanagari 714"
main_color = "#333333"
accent_color = "#30009C"
background_color = "#ffffff"
background_color_accent = "#f2f3f4"
highlight_color = "#FB3552"
| [
37811,
22289,
38491,
12755,
5585,
37811,
198,
198,
10331,
62,
17989,
796,
366,
2484,
631,
6245,
272,
363,
2743,
767,
1415,
1,
198,
12417,
62,
8043,
796,
25113,
24840,
2091,
1,
198,
330,
1087,
62,
8043,
796,
25113,
23924,
24,
34,
1,
... | 2.848101 | 79 |
from kii import results as rs
from kii.helpers import RequestHelper, AuthRequestHelper
| [
6738,
479,
4178,
1330,
2482,
355,
44608,
198,
6738,
479,
4178,
13,
16794,
364,
1330,
19390,
47429,
11,
26828,
18453,
47429,
628,
628,
628
] | 3.833333 | 24 |
import debug
from ruler import Ruler
import input
from output import NaturalDeductionTree, Step, StepType
from premise import Premise
from fsm import MasterPool, ProveFSM, Pool
CONCLUDE = ":-"
| [
11748,
14257,
198,
6738,
22740,
1330,
31808,
198,
11748,
5128,
198,
6738,
5072,
1330,
12068,
35,
276,
8110,
27660,
11,
5012,
11,
5012,
6030,
198,
6738,
18659,
1330,
6929,
786,
198,
6738,
277,
5796,
1330,
5599,
27201,
11,
1041,
303,
1065... | 3.203125 | 64 |
from setuptools import setup, find_packages
setup(name='dehydrated_vae',
version='0.0.0',
install_requires = [
'keras',
'numpy',
],
packages=find_packages())
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
3672,
11639,
2934,
15511,
4111,
62,
33353,
3256,
198,
220,
2196,
11639,
15,
13,
15,
13,
15,
3256,
198,
220,
2721,
62,
47911,
796,
685,
198,
220,
220,
220,... | 2.61194 | 67 |
# -------------------------------------------
# Created by: jasper
# as part of the project: Bachelorarbeit
# Date: 4/29/20
#--------------------------------------------
import setuptools
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="beampropagtor.py-m4rtins",
version="0.7",
author="Jasper Martins",
author_email="m.jasper.martins@gmail.com",
description="This package provides methods to setup and perform 2D FD-Beampropagation",
long_description=long_description,
long_description_content_type="text/markdown",
url="",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
],
python_requires='>=3',
) | [
2,
20368,
32284,
198,
198,
2,
15622,
416,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
474,
32981,
198,
2,
355,
636,
286,
262,
1628,
25,
220,
220,
33399,
283,
15357,
198,
2,
7536,
25,
220,
220,
220,
... | 2.773936 | 376 |
#%%
import warnings
from sklearn.metrics import mean_squared_error
from sklearn.model_selection._split import train_test_split
import traceback
from unsupervised_learning.src import plot_clusters
import copy
warnings.filterwarnings("ignore")
import os
os.chdir(os.path.dirname(__file__))
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.decomposition import PCA, FastICA
from sklearn.ensemble import RandomForestClassifier
import datetime
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import kurtosis
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score, homogeneity_completeness_v_measure
from sklearn.random_projection import SparseRandomProjection
from get_data import *
from utils import *
from utils2 import *
from plot_clusters import *
#%%
def main():
'''
kmeans, gmm = cluster()
reduced_data1, reduced_data2 = dimensionality_reduction() #input 2 data, output 8 dataset
cluster11, cluster22 = cluster_on_reduced_data(reduced_data) #output 16 dataset
compare_clusters(clusters1, clusters2, cluster11, cluster22)
nn_dim_red = neural_net(reduced_data1)
nn_cluster = neural_net(clusters1, clusters2)
compare_neural_nets()
'''
x1, y1 = get_dataset1(False)
x2, y2 = get_dataset2(False)
X1_train, X1_test, y1_train, y1_test = train_test_split(x1, y1, test_size=0.33, random_state=42)
X2_train, X2_test, y2_train, y2_test = train_test_split(x2, y2, test_size=0.33, random_state=42)
X1_train_clustr, X1_train_nn, y1_train_clustr, y1_train_nn = train_test_split(
X1_train, y1_train, test_size=0.5, random_state=42)
X2_train_clustr, X2_train_nn, y2_train_clustr, y2_train_nn = train_test_split(
X2_train, y2_train, test_size=0.5, random_state=42)
X1_scaler = MinMaxScaler()
X2_scaler = MinMaxScaler()
X1_train_clustr = X1_scaler.fit_transform(X1_train_clustr)
X2_train_clustr = X2_scaler.fit_transform(X2_train_clustr)
X1_train_nn = X1_scaler.transform(X1_train_nn)
X2_train_nn = X2_scaler.transform(X2_train_nn)
X1_test = X1_scaler.transform(X1_test)
X2_test = X2_scaler.transform(X2_test)
#===============================================================================================
# # STEP1: CLUSTER
#===============================================================================================
print("\n=========\n","STEP#1", "\n=========")
run_cluster(X1_train_clustr,y1_train_clustr,X2_train_clustr,y2_train_clustr, True)
clusters = get_best_clusters(X1_train_clustr,y1_train_clustr,X2_train_clustr,y2_train_clustr)
#===============================================================================================
# # STEP2: DIMENSIONALITY REDUCTION AND FEATURE SELECTION
# FIND BEST PROJECTIONS/FEATURE FOR EACH DIMENSIONALITY REDUCTION/FEATURE SELECTION
#===============================================================================================
print("\n=========\n","STEP#2", "\n=========")
best_features = dimensionality_reduction(X1_train_clustr,y1_train_clustr,X2_train_clustr,y2_train_clustr)
#best_features = run_RFC(X1_train_clustr,y1_train_clustr,X2_train_clustr,y2_train_clustr)
print("best RFC features", best_features)
#best_features = {'wine': [10, 7, 1, 4, 6, 2], 'pima': [1, 5, 7, 6, 3, 2]}
best_reducers = get_best_dimensionality_reductions(X1_train_clustr, X2_train_clustr, best_features)
#===============================================================================================
# # STEP3: CLUSTER FOR KMEANS AND GMM FOR 4 DIM. RED. ALGORITHMS AND 2 DATASET. TOTAL 16 COMBO
#===============================================================================================
print("\n=========\n","STEP#3", "\n=========")
for d in best_reducers:
if d == 'rfc':
x1_train_clustr_reduced = X1_train_clustr[:, best_reducers[d]['wine']]
x2_train_clustr_reduced = X2_train_clustr[:, best_reducers[d]['pima']]
else:
reducer_wine = best_reducers[d]['wine']
reducer_pima = best_reducers[d]['pima']
x1_train_clustr_reduced = reducer_wine.transform(X1_train_clustr)
x2_train_clustr_reduced = reducer_pima.transform(X2_train_clustr)
run_cluster(x1_train_clustr_reduced, y1_train_clustr,
x2_train_clustr_reduced, y2_train_clustr, plot=True, title=d+'-'+d.upper())
#===============================================================================================
# STEP4: BUILD 4 NEURAL NET MODELS FOR REDUCED PIMA DATASET BY USING THE DIM. RED. ALGORITHMS
# Train NN
#===============================================================================================
activation = ['logistic', 'tanh']
alpha = np.logspace(-2, 4, 15)
hidden_layer_sizes = [(12,6,4,2)]
result_data = []
models = {}
for m in ['pca', 'ica', 'rp', 'rfc', 'benchmark', 'kmeans', 'gmm']:
k = models.setdefault(m, {})
k['model'] = pipe = Pipeline([('cfr', MLPClassifier((6, 4, 2), random_state=42, activation='logistic',
max_iter=100, tol=0.001, n_iter_no_change=80, learning_rate='adaptive'))])
print("\n=========\n","STEP#4", "\n=========")
#X2_train_nn = X2_scaler.transform(X2_train_nn)
#X2_test = X2_scaler.transform(X2_test)
for d in best_reducers:
print(d.upper(), "\n=========")
if d == 'rfc':
# REDUCE THE TRAINING SET SAVED FOR NEURAL NET USING THE BEST DIM. RED. ALGOS FROM STEP#2
x2_train_nn_reduced = X2_train_nn[:, best_reducers[d]['pima']]
x2_test_reduced = X2_test[:, best_reducers[d]['pima']]
else:
reducer_pima = best_reducers[d]['pima']
print('reducer=', reducer_pima)
x2_train_nn_reduced = reducer_pima.transform(X2_train_nn)
x2_test_reduced = reducer_pima.transform(X2_test)
#pipe = Pipeline([('cfr', MLPClassifier((6, 4, 2), random_state=42, activation='logistic',
# max_iter=100, tol=0.001))])
#models[d]['model'] = models[d]['model'].fit(x2_train_nn_reduced, y2_train_nn)
models[d]['model'] = tune_nn(activation, alpha, hidden_layer_sizes, x2_train_nn_reduced, y2_train_nn, models[d]['model'])
#tuned_model = pipe
train_score = models[d]['model'].score(x2_train_nn_reduced, y2_train_nn)
test_score = models[d]['model'].score(x2_test_reduced, y2_test)
result_data.append([d.upper(), x2_train_nn_reduced.shape[1],
train_score, test_score,
models[d]['model'].best_estimator_['cfr'].loss_curve_, models[d]['model'].refit_time_])
print('BENCHMARK', "\n=========")
models['benchmark']['model'].fit(X2_train_nn, y2_train_nn)
models['benchmark']['model'] = tune_nn(activation, alpha, hidden_layer_sizes, X2_train_nn, y2_train_nn, models['benchmark']['model'])
#tuned_model = pipe
train_score = models['benchmark']['model'].score(X2_train_nn, y2_train_nn)
test_score = models['benchmark']['model'].score(X2_test, y2_test)
result_data.append(['BENCHMARK', X2_train_nn.shape[1],
train_score, test_score, models['benchmark']['model'].best_estimator_['cfr'].loss_curve_, models['benchmark']['model'].refit_time_])
#===============================================================================================
# # STEP 5: ADD CLUSTERS FROM STEP1 AS FEATURES TO THE DATA AND RERUN NEURAL NETWORK
#===============================================================================================
print("\n=========\n","STEP#5", "\n=========")
d = 'pima'
c = 'kmeans'
cluster_algo = clusters[d][c]['obj']
#print(cluster_algo)
X2_train_nn1 = X2_train_nn
#print('X2_train_nn', X2_train_nn)
cluster_train_pred = cluster_algo.predict(X2_train_nn)
#print("cluster_train_pred", cluster_train_pred)
cluster_test_pred = cluster_algo.predict(X2_test)
#add the clusters as a new feature
enhanced_X2_train_nn1 = np.append(X2_train_nn, cluster_train_pred.reshape(-1,1), axis=1)
enhanced_X2_test1 = np.append(X2_test, cluster_test_pred.reshape(-1,1), axis=1)
#pipe = Pipeline([('cfr', MLPClassifier((6, 4, 2), random_state=42, activation='logistic', max_iter=100, tol=0.001))])
models[c]['model'] = models[c]['model'].fit(enhanced_X2_train_nn1, y2_train_nn)
#tuned_model = tune_nn(activation, alpha, hidden_layer_sizes, enhanced_X2_train_nn, y2_train_nn, pipe)
models[c]['model'] = tune_nn(activation, alpha, hidden_layer_sizes, enhanced_X2_train_nn1, y2_train_nn, models[c]['model'])
train_score = models[c]['model'].score(enhanced_X2_train_nn1, y2_train_nn)
print(train_score)
test_score = models[c]['model'].score(enhanced_X2_test1, y2_test)
#print("models[c]['model']", models[c]['model'])
#print([c.upper(), enhanced_X2_train_nn1.shape[1],
# train_score, test_score, models[c]['model'].best_estimator_['cfr'].loss_curve_,
# ])
result_data.append([c.upper(), enhanced_X2_train_nn1.shape[1],
train_score, test_score, models[c]['model'].best_estimator_['cfr'].loss_curve_,
models[c]['model'].refit_time_
])
#print(result_data)
c = 'gmm'
cluster_algo = clusters[d][c]['obj']
#print(cluster_algo)
#print('X2_train_nn', X2_train_nn)
cluster_train_pred = cluster_algo.predict(X2_train_nn)
#print("cluster_train_pred", cluster_train_pred)
cluster_test_pred = cluster_algo.predict(X2_test)
#add the clusters as a new feature
enhanced_X2_train_nn = np.append(X2_train_nn, cluster_train_pred.reshape(-1,1), axis=1)
enhanced_X2_test = np.append(X2_test, cluster_test_pred.reshape(-1,1), axis=1)
#pipe = Pipeline([('cfr', MLPClassifier((6, 4, 2), random_state=42, activation='logistic', max_iter=100, tol=0.001))])
#models[c]['model'] = models[c]['model'].fit(enhanced_X2_train_nn, y2_train_nn)
models[c]['model'] = tune_nn(activation, alpha, hidden_layer_sizes, enhanced_X2_train_nn, y2_train_nn, models[c]['model'])
train_score = models[c]['model'].score(enhanced_X2_train_nn, y2_train_nn)
#print(train_score)
test_score = models[c]['model'].score(enhanced_X2_test, y2_test)
result_data.append([c.upper(), enhanced_X2_train_nn.shape[1],
train_score, test_score, models[c]['model'].best_estimator_['cfr'].loss_curve_,
models[c]['model'].refit_time_
])
#print(result_data)
result_df = pd.DataFrame(result_data, columns=['Model', 'Dimension',
'Training F1 Score', 'Testing F1 Score', 'Loss', 'Training Time(sec)'])
test_score_fun(result_df, 'pima-reduced dimension', 'step4_and_5')
#clusters1, clusters2 = cluster()
if __name__=="__main__":
main()
| [
2,
16626,
201,
198,
11748,
14601,
201,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
16485,
1144,
62,
18224,
201,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
13557,
35312,
1330,
4512,
62,
9288,
62,
35312,
201,
198,
11748,
... | 2.121348 | 5,818 |
from dataclasses import dataclass, field
from math import ceil, floor
from typing import Dict, Iterable, List, Optional, Set, Tuple, Union, cast
import itertools
import dsd.search as ds # type: ignore
import dsd.constraints as dc
# TODO: Go over each constraint, using NUPACK
# - check pfunc for each strand
# - check every pair of signal strands not in same complex (use binding4)
# - check every complex is well-formed (print base-pair probabilties and MFE)
# Constants
# Constants -- Toehold domain
SIGNAL_DOMAIN_LENGTH = 15
EXTENDED_TOEHOLD_LENGTH = 2
TOEHOLD_LENGTH = 5
# Constants -- Illegal DNA Base sequences
ILLEGAL_SUBSTRINGS_FOUR = ['G' * 4, 'C' * 4]
ILLEGAL_SUBSTRINGS_FIVE = ['A' * 5, 'T' * 5]
ILLEGAL_SUBSTRINGS = ILLEGAL_SUBSTRINGS_FOUR + ILLEGAL_SUBSTRINGS_FIVE
# NumpyConstraints
three_letter_code_constraint = dc.RestrictBasesConstraint(('A', 'C', 'T'))
no_gggg_constraint = dc.ForbiddenSubstringConstraint(ILLEGAL_SUBSTRINGS_FOUR)
no_aaaaa_constraint = dc.ForbiddenSubstringConstraint(ILLEGAL_SUBSTRINGS_FIVE)
c_content_constraint = dc.BaseCountConstraint('C', floor(0.7 * SIGNAL_DOMAIN_LENGTH),
ceil(0.3 * SIGNAL_DOMAIN_LENGTH))
# Domain pools
SUBDOMAIN_SS_POOL: dc.DomainPool = dc.DomainPool(f'SUBDOMAIN_SS_POOL',
SIGNAL_DOMAIN_LENGTH - EXTENDED_TOEHOLD_LENGTH)
SUBDOMAIN_S_POOL: dc.DomainPool = dc.DomainPool(f'SUBDOMAIN_S_POOL', EXTENDED_TOEHOLD_LENGTH)
TOEHOLD_DOMAIN_POOL: dc.DomainPool = dc.DomainPool(
'TOEHOLD_DOMAIN_POOL', TOEHOLD_LENGTH, [three_letter_code_constraint])
SIGNAL_DOMAIN_POOL: dc.DomainPool = dc.DomainPool(
'SIGNAL_DOMAIN_POOL', SIGNAL_DOMAIN_LENGTH,
[three_letter_code_constraint, c_content_constraint, no_aaaaa_constraint, no_gggg_constraint])
# Alias
dc_complex_constraint = dc.nupack_complex_base_pair_probability_constraint
# Stores all domains used in design
TOEHOLD_DOMAIN: dc.Domain = dc.Domain('T', pool=TOEHOLD_DOMAIN_POOL)
FUEL_DOMAIN: dc.Domain = dc.Domain('fuel', sequence='CATTTTTTTTTTTCA', fixed=True)
recognition_domains_and_subdomains: Dict[str, dc.Domain] = {}
recognition_domains: Set[dc.Domain] = set()
def get_signal_domain(gate: Union[int, str]) -> dc.Domain:
"""Returns a signal domain with S{gate} and stores it to all_domains for
future use.
:param gate: Gate.
:type gate: str
:return: Domain
:rtype: Domain
"""
if f'S{gate}' not in recognition_domains_and_subdomains:
d_13: dc.Domain = dc.Domain(f'ss{gate}', pool=SUBDOMAIN_SS_POOL, dependent=True)
d_2: dc.Domain = dc.Domain(f's{gate}', pool=SUBDOMAIN_S_POOL, dependent=True)
d: dc.Domain = dc.Domain(f'S{gate}', pool=SIGNAL_DOMAIN_POOL, dependent=False, subdomains=[d_2, d_13])
recognition_domains_and_subdomains[f'ss{gate}'] = d_13
recognition_domains_and_subdomains[f's{gate}'] = d_2
recognition_domains_and_subdomains[f'S{gate}'] = d
assert d not in recognition_domains
recognition_domains.add(d)
return recognition_domains_and_subdomains[f'S{gate}']
def set_domain_pool(domain: dc.Domain, domain_pool: dc.DomainPool) -> None:
"""Assigns domain_pool to domain. If domain already has a domain pool, this
function asserts that the pool matches the domain_pool.
:param domain: Domain to be assigned a pool
:type domain: dc.Domain
:param domain_pool: Pool to assign to Domain
:type domain_pool: dc.DomainPool
"""
if domain._pool:
if domain.pool is not domain_pool:
raise AssertionError(f'Assigning pool {domain_pool} to domain '
f'{domain} but {domain} already has domain '
f'pool {domain_pool}')
else:
domain.pool = domain_pool
def signal_strand(
gate3p: Union[int, str],
gate5p: Union[int, str]) -> dc.Strand:
"""Returns a signal strand with recognition domains
gate3p and gate5p on the 3' and 5' respectively
.. code-block:: none
S{g3p} S{g5p}
ss{g3p} s{g3p} T ss{g5p} s{g5p}
| | | | |
<=============--==--=====--=============--==]
:param gate3p: Gate to be identified by the recognition domain on the 3'
end
:type gate3p: Union[int, str]
:param gate5p: Gate to be identified by the recognition domain on the 5'
end
:type gate5p: Union[int, str]
:return: Strand
:rtype: dc.Strand
"""
d3p = get_signal_domain(gate3p)
d5p = get_signal_domain(gate5p)
name = f'signal_{gate3p}_{gate5p}'
return dc.Strand(domains=[d5p, TOEHOLD_DOMAIN, d3p], starred_domain_indices=[], name=name)
def fuel_strand(gate: int) -> dc.Strand:
"""Returns a fuel strand with recognition domain `gate`.
.. code-block:: none
ss{gate} s{gate} T ssf sf
| | | | |
<=============--==--=====--=============--==]
:param gate: The name of the gate that this fuel strand will fuel.
:type gate: int
:return: Fuel strand
:rtype: dc.Strand
"""
d3p = get_signal_domain(gate)
fuel = FUEL_DOMAIN
name = f'fuel_{gate}'
return dc.Strand(domains=[fuel, TOEHOLD_DOMAIN, d3p], starred_domain_indices=[], name=name)
def gate_base_strand(gate: int) -> dc.Strand:
"""Returns a gate base strand with recognition domain `gate`.
.. code-block:: none
S{gate}*
T* ss{gate}* s{gate}* T*
| | | |
[=====--=============--==--=====>
:param gate: Gate to be identified by the recognition domain
:type gate: int
:return: Gate base strand
:rtype: dc.Strand
"""
d = get_signal_domain(gate)
s: dc.Strand = dc.Strand(
domains=[TOEHOLD_DOMAIN, d, TOEHOLD_DOMAIN],
starred_domain_indices=[0, 1, 2],
name=f'gate_base_{gate}')
return s
def threshold_bottom_strand(input_: int, gate: int) -> dc.Strand:
"""Returns a threshold bottom strand for seesaw gate labeled `gate` that
thresholds `input`
.. code-block:: none
s{input}* T* ss{gate}* s{gate}*
| | | |
[==--=====--=============--==>
:param input_: Name of input that is being thresholded
:type input_: int
:param gate: Name of gate
:type gate: int
:return: Threshold bottom strand
:rtype: dc.Strand
"""
# Note, this assumes that this input signal domain has already been built
d_input_sub = recognition_domains_and_subdomains[f's{input_}']
d_gate = get_signal_domain(gate)
s: dc.Strand = dc.Strand(
domains=[d_input_sub, TOEHOLD_DOMAIN, d_gate],
starred_domain_indices=[0, 1, 2],
name=f'threshold_bottom_{input_}_{gate}')
return s
def threshold_top_strand(gate: int) -> dc.Strand:
"""Returns a waste strand for a thresholding reaction involving
the seesaw gate labeled `gate`
.. code-block:: none
ss{gate} s{gate}
| |
<=============--==]
:param gate: Name of gate
:type gate: int
:return: Waste strand
:rtype: dc.Strand
"""
s: dc.Strand = dc.Strand(
domains=[get_signal_domain(gate)],
starred_domain_indices=[],
name=f'threshold_top_{gate}')
return s
def reporter_top_strand(gate: int) -> dc.Strand:
"""Returns a waste strand for a reporting reaction involving
the seesaw gate labeled `gate`
.. code-block:: none
ss{gate} s{gate}
| |
<=============--==]
:param gate: Name of gate
:type gate: int
:return: Waste strand
:rtype: dc.Strand
"""
s: dc.Strand = dc.Strand(domains=[get_signal_domain(gate)], starred_domain_indices=[],
name=f'reporter_top_{gate}')
return s
def reporter_bottom_strand(gate) -> dc.Strand:
"""Returns a reporter bottom strand for seesaw gate labeled `gate`
.. code-block:: none
T* ss{gate}* s{gate}*
| | |
[=====--=============--==>
:param gate: Name of gate
:type gate: [type]
:return: Reporter bottom strand
:rtype: dc.Strand
"""
s: dc.Strand = dc.Strand(
domains=[TOEHOLD_DOMAIN, get_signal_domain(gate)],
starred_domain_indices=[0, 1],
name=f'reporter_bottom_{gate}')
return s
def input_gate_complex_constraint(
input_gate_complexes: List[Tuple[dc.Strand, dc.Strand]]) -> dc.ComplexConstraint:
"""Returns a input:gate complex constraint
.. code-block:: none
S{input} s{input} T S{gate} s{gate}
| | | | |
<=============--==--=====--=============--==]
||||| ||||||||||||| ||
[=====--=============--==--=====>
| | | |
T* S{gate}* s{gate}* T*
S2 s2 T S5 s5
21
34 22|20 19 15 14 2 10
| | || | | | | ||
<=============-==--=====--=============--==]
||||| ||||||||||||| ||
[=====--=============--==--=====>
| | | | || | |
35 39 40 52 |54 55 59
53
T* S5* s5* T*
:param input_gate_complexes: List of input:gate complexes
:type input_gate_complexes: List[Tuple[dc.Strand, ...]]
:return: A complex constraint on the base-pairing probabilities
:rtype: dc.ComplexConstraint
"""
assert input_gate_complexes
template_complex = input_gate_complexes[0]
assert len(template_complex) == 2
template_top_strand = template_complex[0]
template_bot_strand = template_complex[1]
addr_t = template_top_strand.address_of_first_domain_occurence('T')
addr_t_star = template_bot_strand.address_of_first_domain_occurence('T*')
return dc_complex_constraint(
strand_complexes=cast(
List[Tuple[dc.Strand, ...]],
input_gate_complexes),
nonimplicit_base_pairs=[(addr_t, addr_t_star)],
description="input:gate Complex",
short_description="input:gate")
def gate_output_complex_constraint(
gate_output_complexes: List[Tuple[dc.Strand, ...]],
base_pair_prob_by_type: Optional[Dict[dc.BasePairType, float]] = None,
description: str = 'gate:output') -> dc.ComplexConstraint:
"""Returns a gate:output complex constraint
.. code-block:: none
S{gate} s{gate} T S{output} s{output}
| | | | |
<=============--==--=====--=============--==]
||||||||||||| || |||||
[=====--=============--==--=====>
| | | |
T* S{gate}* s{gate}* T*
S5 s5 T S6 / S7 s6 / s7
21
34 22 |20 19 15 14 2 10
| | || | | | | ||
<=============--==--=====--=============--==]
||||||||||||| || |||||
[=====--=============--==--=====>
| | | | || | |
35 39 40 52 |54 55 59
53
T* S5* s5* T*
:param gate_output_complexes: List of gate:output complexes
:type gate_output_complexes: List[Tuple[dc.Strand, ...]]
:param base_pair_prob_by_type: probabilities to assign to each type of base pair
:type base_pair_prob_by_type: Optional[Dict[dc.BasePairType, float]]
:param description: description of complex
:type description: str
:return: A complex constraint on the base-pairing probabilities
:rtype: dc.ComplexConstraint
"""
assert gate_output_complexes
template_complex = gate_output_complexes[0]
assert len(template_complex) == 2
template_top_strand = template_complex[0]
template_bot_strand = template_complex[1]
addr_t = template_top_strand.address_of_first_domain_occurence('T')
addr_t_star = template_bot_strand.address_of_last_domain_occurence('T*')
return dc_complex_constraint(
strand_complexes=gate_output_complexes,
nonimplicit_base_pairs=[(addr_t, addr_t_star)],
base_pair_prob_by_type=base_pair_prob_by_type, description=f"{description} Complex",
short_description=f"{description}"
)
def base_difference_constraint(domains: Iterable[dc.Domain]) -> dc.DomainPairConstraint:
"""
For any two sequences in the pool, we require at least 30% of bases are
different and the longest run of matches is at most 35% of the domain length
:param domains: Domains to compare
:type domains: Iterable[dc.Domain]
:return: DomainPairConstraint
:rtype: dc.DomainPairConstraint
"""
pairs = itertools.combinations(domains, 2)
return dc.DomainPairConstraint(pairs=tuple(pairs), evaluate=evaluate,
description='base difference constraint',
short_description='base difference constraint')
def strand_substring_constraint(
strands: List[dc.Strand],
substrings: List[str]) -> dc.StrandConstraint:
"""Returns a strand constraint that restricts the substrings in the strand
sequence
:param strands: Strands to apply constraint on
:type strands: List[dc.Strand]
:param substrings: Substrings to disallow
:type substrings: List[str]
:return: [description]
:rtype: dc.StrandConstraint
"""
return dc.StrandConstraint(description="Strand Substring Constraint",
short_description="Strand Substring Constraint",
evaluate=evaluate,
strands=tuple(strands),
summary=summary)
@dataclass
class SeesawCircuit:
"""Class for keeping track of a seesaw circuit and its DNA representation.
"""
seesaw_gates: List['SeesawGate']
strands: List[dc.Strand] = field(init=False, default_factory=list)
constraints: List[dc.ComplexConstraint] = field(
init=False, default_factory=list)
signal_strands: Dict[Tuple[int, int], dc.Strand] = field(
init=False, default_factory=dict)
fuel_strands: Dict[int, dc.Strand] = field(
init=False, default_factory=dict)
gate_base_strands: Dict[int, dc.Strand] = field(
init=False, default_factory=dict)
threshold_top_strands: Dict[int, dc.Strand] = field(
init=False, default_factory=dict)
threshold_bottom_strands: Dict[Tuple[int, int], dc.Strand] = field(
init=False, default_factory=dict)
reporter_top_strands: Dict[int, dc.Strand] = field(
init=False, default_factory=dict)
reporter_bottom_strands: Dict[Tuple[int, int], dc.Strand] = field(
init=False, default_factory=dict)
def _set_gate_base_strands(self) -> None:
"""Sets self.gate_base_strands
:raises ValueError: If duplicate gate name found
"""
# Set of all gates
gates: Set[int] = set()
for seesaw_gate in self.seesaw_gates:
gate_name = seesaw_gate.gate_name
if gate_name in gates:
raise ValueError(f'Invalid seesaw circuit: '
'Multiple gates labeled {gate_name} found')
if not seesaw_gate.is_reporter:
gates.add(gate_name)
self.gate_base_strands = {gate: gate_base_strand(gate)
for gate in gates}
def _set_signal_strands(self) -> None:
"""Sets self.signal_strands
:raises ValueError: If duplicate gate name found
"""
# Set of all input, gate pairs
input_gate_pairs: Set[Tuple[int, int]] = set()
for seesaw_gate in self.seesaw_gates:
gate_name = seesaw_gate.gate_name
if gate_name in input_gate_pairs:
raise ValueError(f'Invalid seesaw circuit: '
'Multiple gates labeled {gate_name} found')
for input_ in seesaw_gate.inputs:
assert (input_, gate_name) not in input_gate_pairs
input_gate_pairs.add((input_, gate_name))
self.signal_strands = {(input_, gate): signal_strand(input_, gate)
for input_, gate in input_gate_pairs}
def _set_fuel_strands(self) -> None:
"""Sets self.fuel_strands
:raises ValueError: If duplicate gate name found
"""
# Set of all gates with fuel
gates_with_fuel: Set[int] = set()
for seesaw_gate in self.seesaw_gates:
if seesaw_gate.has_fuel:
gate_name = seesaw_gate.gate_name
if gate_name in gates_with_fuel:
raise ValueError(
f'Invalid seesaw circuit: '
'Multiple gates labeled {gate_name} found')
gates_with_fuel.add(gate_name)
self.fuel_strands = {gate: fuel_strand(
gate) for gate in gates_with_fuel}
def _set_threshold_bottom_strands(self) -> None:
"""Sets self.threshold_bottom_strands
:raises ValueError: If duplicate gate name found
"""
# Set of all input, gate pairs with threshold
input_gate_pairs_with_threshold: Set[Tuple[int, int]] = set()
for seesaw_gate in self.seesaw_gates:
if seesaw_gate.has_threshold and not seesaw_gate.is_reporter:
gate_name = seesaw_gate.gate_name
if gate_name in input_gate_pairs_with_threshold:
raise ValueError(
f'Invalid seesaw circuit: '
'Multiple gates labeled {gate_name} found')
for input_ in seesaw_gate.inputs:
assert (input_, gate_name) not in input_gate_pairs_with_threshold
input_gate_pairs_with_threshold.add((input_, gate_name))
self.threshold_bottom_strands = {(input_, gate): threshold_bottom_strand(
input_, gate) for input_, gate in input_gate_pairs_with_threshold}
def _set_threshold_top_strands(self) -> None:
"""Sets self.threshold_top_strands
:raises ValueError: If duplicate gate name found
"""
# Set of all gates with threshold
gates_with_threshold_but_not_reporter: Set[int] = set()
for seesaw_gate in self.seesaw_gates:
if seesaw_gate.has_threshold and not seesaw_gate.is_reporter:
gate_name = seesaw_gate.gate_name
if gate_name in gates_with_threshold_but_not_reporter:
raise ValueError(
f'Invalid seesaw circuit: '
'Multiple gates labeled {gate_name} found')
gates_with_threshold_but_not_reporter.add(gate_name)
self.threshold_top_strands = {gate: threshold_top_strand(gate)
for gate in gates_with_threshold_but_not_reporter}
def _set_reporter_top_strands(self) -> None:
"""Sets self.reporter_top_strands
:raises ValueError: If duplicate gate name found
"""
# Set of all gates that are reporter
gates_that_are_reporter: Set[int] = set()
for seesaw_gate in self.seesaw_gates:
if seesaw_gate.is_reporter:
gate_name = seesaw_gate.gate_name
if gate_name in gates_that_are_reporter:
raise ValueError(
f'Invalid seesaw circuit: '
'Multiple gates labeled {gate_name} found')
gates_that_are_reporter.add(gate_name)
self.reporter_top_strands = {gate: reporter_top_strand(gate)
for gate in gates_that_are_reporter}
def _set_reporter_bottom_strands(self) -> None:
"""Sets self.reporter_gates
:raises ValueError: If duplicate gate name found
"""
# Set of all reporter gates
reporter_gates: Set[Tuple[int, int]] = set()
for seesaw_gate in self.seesaw_gates:
if seesaw_gate.is_reporter:
gate_name = seesaw_gate.gate_name
if gate_name in reporter_gates:
raise ValueError(
f'Invalid seesaw circuit: '
'Multiple gates labeled {gate_name} found')
inputs = seesaw_gate.inputs
assert len(inputs) == 1
reporter_gates.add((inputs[0], gate_name))
self.reporter_bottom_strands = {(input_, gate): reporter_bottom_strand(gate)
for input_, gate in reporter_gates}
def _set_strands(self) -> None:
"""Sets self.strands
"""
self._set_gate_base_strands()
self._set_signal_strands()
self._set_fuel_strands()
self._set_threshold_bottom_strands()
self._set_threshold_top_strands()
self._set_reporter_bottom_strands()
self._set_reporter_top_strands()
self.strands = (list(self.signal_strands.values())
+ list(self.fuel_strands.values())
+ list(self.gate_base_strands.values())
+ list(self.threshold_bottom_strands.values())
+ list(self.threshold_top_strands.values())
+ list(self.reporter_bottom_strands.values())
+ list(self.reporter_top_strands.values()))
def _add_input_gate_complex_constraint(self) -> None:
"""Adds input:gate complexes to self.constraint
"""
input_gate_complexes = []
for (input_, gate), s in self.signal_strands.items():
if gate in self.gate_base_strands:
g = self.gate_base_strands[gate]
input_gate_complexes.append((s, g))
self.constraints.append(
input_gate_complex_constraint(
input_gate_complexes))
def _add_gate_output_complex_constriant(self) -> None:
"""Adds gate:output complexes to self.constraint
"""
gate_output_complexes: List[Tuple[dc.Strand, ...]] = []
for (gate, _), s in self.signal_strands.items():
if gate in self.gate_base_strands:
g = self.gate_base_strands[gate]
gate_output_complexes.append((s, g))
self.constraints.append(
gate_output_complex_constraint(
gate_output_complexes
)
)
def _add_gate_fuel_complex_constriant(self) -> None:
"""Adds gate:fuel complexes to self.constraint
"""
gate_output_complexes: List[Tuple[dc.Strand, ...]] = []
for gate in self.fuel_strands:
if gate in self.fuel_strands:
f = self.fuel_strands[gate]
g = self.gate_base_strands[gate]
gate_output_complexes.append((f, g))
# TODO: Make it so that only specific base pairs have lower threshold (such as base index 1)
# which is an A that can bind to any T but it doesn't matter which.
self.constraints.append(
gate_output_complex_constraint(
gate_output_complexes,
base_pair_prob_by_type={dc.BasePairType.UNPAIRED: 0.8},
description='gate:fuel'
)
)
def _add_threshold_complex_constraint(self) -> None:
"""Adds threshold complexes to self.constraint
.. code-block:: none
S5 s5
14 2 10
| | ||
<=============--==]
||||||||||||| ||
[==--=====--=============--==>
|| | | | | ||
15| 17 21 22 34 |36
16 35
s2* T* S5* s5*
"""
threshold_complexes: List[Tuple[dc.Strand, ...]] = []
for (_, gate), thres_bottom_strand in self.threshold_bottom_strands.items():
waste_strand = self.threshold_top_strands[gate]
threshold_complexes.append((waste_strand, thres_bottom_strand))
self.constraints.append(
dc_complex_constraint(
threshold_complexes,
description="Threshold Complex",
short_description="threshold"))
def _add_threshold_waste_complex_constraint(self) -> None:
"""Adds threshold waste complexes to self.constraint
.. code-block:: none
S2 s2 T S5 s5
21
34 22|20 19 15 14 2 10
| | || | | | | ||
<=============-==--=====--=============--==]
|| ||||| ||||||||||||| ||
[==--=====--=============--==>
|| | | | | ||
35| 37 41 42 54 |56
36 55
s2* T* S5* s5*
"""
threshold_waste_complexes: List[Tuple[dc.Strand, ...]] = []
for (input_, gate), thres_bottom_strand in self.threshold_bottom_strands.items():
sig_strand = self.signal_strands[(input_, gate)]
threshold_waste_complexes.append(
(sig_strand, thres_bottom_strand))
self.constraints.append(
dc_complex_constraint(
threshold_waste_complexes,
description="Threshold Waste Complex",
short_description="threshold waste"))
def _add_reporter_complex_constraint(self) -> None:
"""Adds reporter complexes to self.constraint
.. code-block:: none
S6 s6
14 2 10
| | ||
<=============--==]
||||||||||||| ||
[=====--=============--==>
| | | | ||
15 19 20 32 |34
33
T* S6* s6*
"""
reporter_complexes: List[Tuple[dc.Strand, ...]] = []
for (_, gate), reporter_bottom_strand_ in self.reporter_bottom_strands.items():
waste_strand = self.reporter_top_strands[gate]
reporter_complexes.append((waste_strand, reporter_bottom_strand_))
self.constraints.append(
dc_complex_constraint(
reporter_complexes,
description="Reporter Complex",
short_description="reporter"))
def _add_reporter_waste_complex_constraint(self) -> None:
"""Adds reporter waste complexes to self.constraint
.. code-block:: none
S5 s5 T S6 s6
21
34 22 |20 19 15 14 2 10
| | || | | | | ||
<=============--==--=====--=============--==]
||||| ||||||||||||| ||
[=====--=============--==>
| | | | ||
35 39 40 52 |54
53
T* S6* s6*
"""
reporter_waste_complexes: List[Tuple[dc.Strand, ...]] = []
for (input_, gate), reporter_bottom_strand_ in self.reporter_bottom_strands.items():
signal_strand_ = self.signal_strands[(input_, gate)]
reporter_waste_complexes.append(
(signal_strand_, reporter_bottom_strand_))
self.constraints.append(
dc_complex_constraint(
reporter_waste_complexes,
description="Reporter Waste Complex",
short_description="reporter waste"))
def _set_constraints(self) -> None:
"""Sets self.constraints (self.strands must be set)
"""
self._add_input_gate_complex_constraint()
self._add_gate_output_complex_constriant()
self._add_gate_fuel_complex_constriant()
self._add_threshold_complex_constraint()
self._add_threshold_waste_complex_constraint()
self._add_reporter_complex_constraint()
self._add_reporter_waste_complex_constraint()
@dataclass(frozen=True)
class SeesawGate:
"""Class for keeping track of seesaw gate and its input."""
gate_name: int
inputs: List[int]
has_threshold: bool
has_fuel: bool
is_reporter: bool = False
def and_or_gate(integrating_gate_name: int, amplifying_gate_name: int,
inputs: List[int]) -> Tuple[SeesawGate, SeesawGate]:
"""Returns two SeesawGate objects (the integrating gate and amplifying
gate) that implements the AND or OR gate
:param integrating_gate_name: Name for integrating gate
:type integrating_gate_name: int
:param amplifying_gate_name: Name for amplifying gate
:type amplifying_gate_name: int
:param inputs: Inputs into the AND or OR gate
:type inputs: List[int]
:return: An integrating gate and an amplifying gate
:rtype: Tuple[SeesawGate, SeesawGate]
"""
integrating_gate = SeesawGate(
gate_name=integrating_gate_name, inputs=inputs, has_threshold=False,
has_fuel=False)
amplifying_gate = SeesawGate(
gate_name=amplifying_gate_name, inputs=[integrating_gate_name],
has_threshold=True, has_fuel=True)
return integrating_gate, amplifying_gate
def reporter_gate(gate_name: int, input_: int) -> SeesawGate:
"""Returns a SeesawGate for a reporter
:param gate_name: Name of the reporter
:type gate_name: int
:param input_: Input
:type input_: int
:return: SeesawGate for a reporter
:rtype: SeesawGate
"""
return SeesawGate(
gate_name=gate_name, inputs=[input_],
has_threshold=True, has_fuel=False, is_reporter=True)
def input_gate(gate_name: int, input_: int) -> SeesawGate:
"""Returns a SeesawGate for an input
:param gate_name: Name of the gate
:type gate_name: int
:param input_: Input
:type input_: int
:return: SeesawGate
:rtype: SeesawGate
"""
return SeesawGate(
gate_name=gate_name, inputs=[input_],
has_threshold=True, has_fuel=True)
if __name__ == '__main__':
main()
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
10688,
1330,
2906,
346,
11,
4314,
198,
6738,
19720,
1330,
360,
713,
11,
40806,
540,
11,
7343,
11,
32233,
11,
5345,
11,
309,
29291,
11,
4479,
11,
3350,
198,
11748,
... | 2.032191 | 15,346 |
#!/usr/bin/env python
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras import optimizers
from tensorflow.keras import backend as K
import tensorflow as tf
import sklearn
import matplotlib.pyplot as plt
from utils import Sample
# Global variable
OUT_SHAPE = 5
INPUT_SHAPE = (Sample.IMG_H, Sample.IMG_W, Sample.IMG_D)
if __name__ == '__main__':
# Load Training Data
x_train = np.load("data/X.npy")
y_train = np.load("data/y.npy")
#x_train, y_train = sklearn.utils.shuffle(x_train, y_train)
print(x_train.shape[0], 'train samples')
# Training loop variables
epochs = 150
batch_size = 90
learning_rate=0.00001
model = create_model()
model.compile(loss='mse', optimizer=optimizers.Adam(learning_rate=learning_rate))
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=100)
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, validation_split=0.1, callbacks=[callback])
# summarize history for accuracy
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
print ("Epochs : " + str(epochs))
print ("Batch : " + str(batch_size))
print ("Learning rate : " + str(learning_rate))
model.save_weights('model_weights.h5')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
360,
107... | 2.613065 | 597 |
print("Enter a selection from the below menu. Press '0' to exit.")
menu_items = ["Bake a loaf of bread", "Bake a pound cake", "Prepare Roast Chicken", "Make Curry", \
"Put a rack of ribs in the smoker", "Buy dinner out", "Have ice cream", "Sandwiches - again"]
select = None
while True:
for i in range(len(menu_items)):
print(f"{i+1}:\t{menu_items[i]}")
select = int(input("\nMake a selection: "))
if select == 0:
print("Shutting down!\n")
break
if select > len(menu_items):
print("I don't have that many options today!\n")
continue
select -= 1
print(f"\nYou selected: {menu_items[select]}\n") | [
4798,
7203,
17469,
257,
6356,
422,
262,
2174,
6859,
13,
220,
4332,
705,
15,
6,
284,
8420,
19570,
198,
26272,
62,
23814,
796,
14631,
33,
539,
257,
40134,
286,
8509,
1600,
366,
33,
539,
257,
14896,
12187,
1600,
366,
37534,
533,
5564,
... | 2.55 | 260 |
"""
Functional Principal Component Analysis
=======================================
Explores the two possible ways to do functional principal component analysis.
"""
# Author: Yujian Hong
# License: MIT
import skfda
from skfda.datasets import fetch_growth
from skfda.exploratory.visualization import plot_fpca_perturbation_graphs
from skfda.preprocessing.dim_reduction.projection import FPCA
from skfda.representation.basis import BSpline, Fourier, Monomial
import matplotlib.pyplot as plt
import numpy as np
##############################################################################
# In this example we are going to use functional principal component analysis to
# explore datasets and obtain conclusions about said dataset using this
# technique.
#
# First we are going to fetch the Berkeley Growth Study data. This dataset
# correspond to the height of several boys and girls measured from birth to
# when they are 18 years old. The number and time of the measurements are the
# same for each individual. To better understand the data we plot it.
dataset = skfda.datasets.fetch_growth()
fd = dataset['data']
y = dataset['target']
fd.plot()
##############################################################################
# FPCA can be done in two ways. The first way is to operate directly with the
# raw data. We call it discretized FPCA as the functional data in this case
# consists in finite values dispersed over points in a domain range.
# We initialize and setup the FPCADiscretized object and run the fit method to
# obtain the first two components. By default, if we do not specify the number
# of components, it's 3. Other parameters are weights and centering. For more
# information please visit the documentation.
fpca_discretized = FPCA(n_components=2)
fpca_discretized.fit(fd)
fpca_discretized.components_.plot()
##############################################################################
# In the second case, the data is first converted to use a basis representation
# and the FPCA is done with the basis representation of the original data.
# We obtain the same dataset again and transform the data to a basis
# representation. This is because the FPCA module modifies the original data.
# We also plot the data for better visual representation.
dataset = fetch_growth()
fd = dataset['data']
basis = skfda.representation.basis.BSpline(n_basis=7)
basis_fd = fd.to_basis(basis)
basis_fd.plot()
##############################################################################
# We initialize the FPCABasis object and run the fit function to obtain the
# first 2 principal components. By default the principal components are
# expressed in the same basis as the data. We can see that the obtained result
# is similar to the discretized case.
fpca = FPCA(n_components=2)
fpca.fit(basis_fd)
fpca.components_.plot()
##############################################################################
# To better illustrate the effects of the obtained two principal components,
# we add and subtract a multiple of the components to the mean function.
# We can then observe now that this principal component represents the
# variation in the mean growth between the children.
# The second component is more interesting. The most appropriate explanation is
# that it represents the differences between girls and boys. Girls tend to grow
# faster at an early age and boys tend to start puberty later, therefore, their
# growth is more significant later. Girls also stop growing early
plot_fpca_perturbation_graphs(basis_fd.mean(),
fpca.components_,
30,
fig=plt.figure(figsize=(6, 2 * 4)))
##############################################################################
# We can also specify another basis for the principal components as argument
# when creating the FPCABasis object. For example, if we use the Fourier basis
# for the obtained principal components we can see that the components are
# periodic. This example is only to illustrate the effect. In this dataset, as
# the functions are not periodic it does not make sense to use the Fourier
# basis
dataset = fetch_growth()
fd = dataset['data']
basis_fd = fd.to_basis(BSpline(n_basis=7))
fpca = FPCA(n_components=2, components_basis=Fourier(n_basis=7))
fpca.fit(basis_fd)
fpca.components_.plot()
##############################################################################
# We can observe that if we switch to the Monomial basis, we also lose the
# key features of the first principal components because it distorts the
# principal components, adding extra maximums and minimums. Therefore, in this
# case the best option is to use the BSpline basis as the basis for the
# principal components
dataset = fetch_growth()
fd = dataset['data']
basis_fd = fd.to_basis(BSpline(n_basis=7))
fpca = FPCA(n_components=2, components_basis=Monomial(n_basis=4))
fpca.fit(basis_fd)
fpca.components_.plot()
| [
37811,
198,
22203,
282,
32641,
35100,
14691,
198,
10052,
1421,
18604,
198,
198,
18438,
2850,
262,
734,
1744,
2842,
284,
466,
10345,
10033,
7515,
3781,
13,
198,
37811,
198,
198,
2,
6434,
25,
10605,
73,
666,
9764,
198,
2,
13789,
25,
171... | 3.732532 | 1,331 |
from discord import role
from redbot.core import Config, commands, bank, checks
import discord
import random
from redbot.core.utils.chat_formatting import humanize_number
credit = "By: king slayer | Legend Gaming"
| [
6738,
36446,
1330,
2597,
201,
198,
6738,
2266,
13645,
13,
7295,
1330,
17056,
11,
9729,
11,
3331,
11,
8794,
201,
198,
11748,
36446,
201,
198,
11748,
4738,
201,
198,
6738,
2266,
13645,
13,
7295,
13,
26791,
13,
17006,
62,
18982,
889,
133... | 3.46875 | 64 |
# -*- encoding: utf-8 -*-
"""
Ending Module
ReST endpoints
"""
from __future__ import generator_stop
import sys
import os
from collections import OrderedDict as ODict, deque
import enum
try:
import simplejson as json
except ImportError:
import json
import datetime
import mimetypes
import arrow
import falcon
import libnacl
from ioflo.aid.sixing import *
from ioflo.aid import lodict
from ioflo.aid import timing
from ioflo.aid import classing
from ioflo.aio.http import httping
from ioflo.aid import getConsole
from .. import bluepeaing
from ..bluepeaing import SEPARATOR, ANON_EXPIRATION_DELAY, ValidationError
from ..help.helping import (parseSignatureHeader, verify64u, extractDidParts,
extractDatSignerParts, extractDidSignerParts,
validateSignedAgentReg, validateSignedThingReg,
validateSignedResource, validateSignedAgentWrite,
validateSignedThingWrite, keyToKey64u,
validateMessageData, verifySignedMessageWrite,
validateSignedOfferData, buildSignedServerOffer,
validateSignedThingTransfer, validateAnon,
validateIssuerDomainGen, )
from ..db import dbing
from ..keep import keeping
console = getConsole()
STATIC_BASE_PATH = "/static"
DEFAULT_STATIC_BASE_PATH = "/"
AGENT_BASE_PATH = "/agent"
SERVER_BASE_PATH = "/server"
THING_BASE_PATH = "/thing"
ANON_MSG_BASE_PATH = "/anon"
DEMO_BASE_PATH = "/demo"
class StaticSink(object):
"""
Class that provided Falcon sink endpoint for serving static files.
# Geterating the full path of the static resource
path = os.path.abspath(
os.path.join(
static_path,
self.static_dir,
environ['PATH_INFO'].lstrip('/')
)
)
if not path.startswith(static_path) or not os.path.exists(path):
return self.app(environ, start_response)
else:
filetype = mimetypes.guess_type(path, strict=True)[0]
if not filetype:
filetype = 'text/plain'
start_response("200 OK", [('Content-type', filetype)])
return environ['wsgi.file_wrapper'](open(path, 'rb'), 4096)
# project directory
PROJECT_DIR_PATH = os.path.dirname(os.path.abspath(__file__))
# Web application specific static files
STATIC_APP_PATH = os.path.join(PROJECT_DIR_PATH, 'app')
"""
class ServerResource:
"""
Server Agent Resource
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
def on_get(self, req, rep):
"""
Handles GET request for the Server Agent
"""
did = keeping.gKeeper.did
# read from database
try:
dat, ser, sig = dbing.getSelfSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
rep.set_header("Signature", 'signer="{}"'.format(sig))
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class AgentResource:
"""
Agent Resource
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
@classing.attributize
def onPostGen(self, skin, req, rep):
"""
Generator to perform Agent post with support for backend request
to validate issuant (HID)
attributes:
skin._status
skin._headers
are special and if assigned inside generator used by WSGI server
to update status and headers upon first non-empty write.
Does not use self. because only one instance of resource is used
to process all requests.
response = odict([('version', self.respondent.version),
('status', self.respondent.status),
('reason', self.respondent.reason),
('headers', copy.copy(self.respondent.headers)),
('body', self.respondent.body),
('data', self.respondent.data),
('request', request),
('errored', self.respondent.errored),
('error', self.respondent.error),
])
{'check': 'did|issuer|date',
'signer': 'did#index'}
"""
skin._status = None # used to update status in iterator if not None
skin._headers = lodict() # used to update headers in iterator if not empty
yield b'' # ensure its a generator
signature = req.get_header("Signature")
sigs = parseSignatureHeader(signature)
sig = sigs.get('signer') # str not bytes
if not sig:
raise fhttping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid or missing Signature header.')
try:
serb = req.stream.read() # bytes
except Exception:
raise httping.HTTPError(httping.BAD_REQUEST,
'Read Error',
'Could not read the request body.')
ser = serb.decode("utf-8")
try:
dat = validateSignedAgentReg(sig, ser)
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating the request body. {}'.format(ex))
did = dat['did'] # unicode version
if "issuants" in dat: # validate hid control here
for issuant in dat["issuants"]:
try:
result = yield from validateIssuerDomainGen(self.store,
dat,
issuant,
timeout=0.5) # raises error if fails
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating issuant. {}'.format(ex))
# no error so save to database
try:
dbing.putSigned(key=did, ser=ser, sig=sig, clobber=False)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
skin._status = httping.CREATED
didURI = falcon.uri.encode_value(did)
skin._headers["Location"] = "{}?did={}".format(AGENT_BASE_PATH, didURI)
# normally picks of content-type from type of request but set anyway to ensure
skin._headers["Content-Type"] = "application/json; charset=UTF-8"
body = json.dumps(dat, indent=2).encode()
# inside rep.stream generator, body is yielded or returned, not assigned to rep.body
return body
def on_post(self, req, rep):
"""
Handles POST requests
"""
rep.stream = self.onPostGen(req, rep) # iterate on stream generator
def on_get(self, req, rep):
"""
Handles GET request for an AgentResources given by query parameter
with did
"""
all_ = req.get_param("all") # returns url-decoded query parameter value
if all_ and all_.lower() == "true":
all_ = True
else:
all_ = False
did = req.get_param("did") # already has url-decoded query parameter value
issuer = req.get_param("issuer") # returns url-decoded query parameter value
if issuer and issuer.lower() == "true":
issuer = True
else:
issuer = False
if all_:
try: # read from database
entries = dbing.getAgents(issuer=issuer)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Lookup Error',
'Error retrieving resource. {}'.format(ex))
ser = json.dumps(entries, indent=2)
else:
if not did:
raise falcon.HTTPError(falcon.HTTP_400,
'Query Parameter Error',
'Missing query did.')
# read from database
try:
dat, ser, sig = dbing.getSelfSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
rep.set_header("Signature", 'signer="{}"'.format(sig))
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class AgentDidResource:
"""
Agent Did Resource
Access agent by DID
/agent/{adid}
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
@classing.attributize
def onPutGen(self, skin, req, rep, did):
"""
Generator to perform Agent put with support for backend request
to validate issuant (HID)
attributes:
skin._status
skin._headers
are special and if assigned inside generator used by WSGI server
to update status and headers upon first non-empty write.
Does not use self. because only one instance of resource is used
to process all requests.
"""
skin._status = None # used to update status in iterator if not None
skin._headers = lodict() # used to update headers in iterator if not empty
yield b'' # ensure its a generator
signature = req.get_header("Signature")
sigs = parseSignatureHeader(signature)
sig = sigs.get('signer') # str not bytes
if not sig:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid or missing Signature header.')
csig = sigs.get('current') # str not bytes
if not csig:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid or missing Signature header.')
try:
serb = req.stream.read() # bytes
except Exception:
raise httping.HTTPError(httping.BAD_REQUEST,
'Read Error',
'Could not read the request body.')
ser = serb.decode("utf-8")
# Get validated current resource from database
try:
rdat, rser, rsig = dbing.getSelfSigned(did)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Resource Verification Error',
'Error verifying signer resource. {}'.format(ex))
# validate request
try:
dat = validateSignedAgentWrite(cdat=rdat, csig=csig, sig=sig, ser=ser)
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating the request body. {}'.format(ex))
if "issuants" in dat: # validate hid control here
for issuant in dat["issuants"]:
try:
result = yield from validateIssuerDomainGen(self.store,
dat,
issuant,
timeout=0.5) # raises error if fails
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating issuant. {}'.format(ex))
# save to database
try:
dbing.putSigned(key=did, ser=ser, sig=sig, clobber=True)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
# normally picks of content-type from type of request but set anyway to ensure
skin._headers["Content-Type"] = "application/json; charset=UTF-8"
skin._headers["Signature"] = 'signer="{}"'.format(sig)
skin._status = httping.OK
# inside rep.stream generator, body is yielded or returned, not assigned to rep.body
return ser.encode()
def on_put(self, req, rep, did):
"""
Handles PUT requests
/agent/{did}
Falcon url decodes path parameters such as {did}
"""
rep.stream = self.onPutGen(req, rep, did) # iterate on stream generator
def on_get(self, req, rep, did):
"""
Handles GET request for an Agent Resource by did
/agent/{did}
Falcon url decodes path parameters such as {did}
"""
# read from database
try:
dat, ser, sig = dbing.getSelfSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
rep.set_header("Signature", 'signer="{}"'.format(sig))
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class AgentDidDropResource:
"""
Agent Did Drop Resource
Drop message in inbox of Agent
/agent/{did}/drop
did is receiver agent did
Attributes:
.store is reference to ioflo data store
{
"uid": "m_00035d2976e6a000_26ace93",
"kind": "found",
"signer": "did:igo:Qt27fThWoNZsa88VrTkep6H-4HA8tr54sHON1vWl6FE=#0",
"date": "2000-01-03T00:00:00+00:00",
"to": "did:igo:dZ74MLZXD-1QHoa73w9pQ9GroAvxqFi2RTZWlkC0raY=",
"from": "did:igo:Qt27fThWoNZsa88VrTkep6H-4HA8tr54sHON1vWl6FE=",
"thing": "did:igo:4JCM8dJWw_O57vM4kAtTt0yWqSgBuwiHpVgd55BioCM=",
"subject": "Lose something?",
"content": "Look what I found"
}
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
def on_post(self, req, rep, did):
"""
Handles POST requests
"""
signature = req.get_header("Signature")
sigs = parseSignatureHeader(signature)
msig = sigs.get('signer') # str not bytes
if not msig:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Invalid or missing Signature header.')
try:
mserb = req.stream.read() # bytes
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
'Read Error',
'Could not read the request body.')
mser = mserb.decode("utf-8")
try:
mdat = validateMessageData(mser)
except ValidationError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Invalid message data. {}'.format(ex))
if did != mdat['to']: # destination to did and did in url not same
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Mismatch message to and url DIDs.')
# extract sdid and keystr from signer field in message
try:
(sdid, index, akey) = extractDatSignerParts(mdat)
except ValueError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Missing or Invalid signer field. {}'.format(ex))
# Get validated signer resource from database
try:
sdat, sser, ssig = dbing.getSelfSigned(sdid)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying signer resource. {}'.format(ex))
# verify request signature
try:
result = verifySignedMessageWrite(sdat=sdat, index=index, sig=msig, ser=mser)
except ValidationError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Error validating the request body. {}'.format(ex))
if sdid != mdat['from']: # destination to did and did in url not same
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Mismatch message from and signer DIDs.')
# Get validated destination agent resource from database
try:
ddat, dser, dsig = dbing.getSelfSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying destination resource. {}'.format(ex))
# Build key for message from (to, from, uid) (did, sdid, muid)
muid = mdat['uid']
key = "{}/drop/{}/{}".format(did, sdid, muid)
# save message to database error if duplicate
try:
dbing.putSigned(key=key, ser=mser, sig=msig, clobber=False) # no clobber so error
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_412,
'Database Error',
'{}'.format(ex.args[0]))
didUri = falcon.uri.encode_value(did)
sdidUri = falcon.uri.encode_value(sdid)
rep.status = falcon.HTTP_201 # post response status with location header
rep.location = "{}/{}/drop?from={}&uid={}".format(AGENT_BASE_PATH,
didUri,
sdidUri,
muid)
rep.body = json.dumps(mdat, indent=2)
def on_get(self, req, rep, did):
"""
Handles GET request for an AgentResources given by query parameter
with did
"""
all_ = req.get_param("all") # returns url-decoded query parameter value
if all_ and all_.lower() == "true":
all_ = True
else:
all_ = False
muid = req.get_param("uid") # returns url-decoded query parameter value
sdid = req.get_param("from") # returns url-decoded query parameter value
if all_:
try: # read from database
entries = dbing.getDrops(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Lookup Error',
'Error retrieving resource. {}'.format(ex))
ser = json.dumps(entries, indent=2)
else:
if not muid or not sdid:
raise falcon.HTTPError(falcon.HTTP_400,
'Query Parameter Error',
'Missing query parameters uid and from.')
key = "{}/drop/{}/{}".format(did, sdid, muid) # (to, from, uid) (did, sdid, muid)
# read from database
try:
dat, ser, sig = dbing.getSigned(key)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
rep.set_header("Signature", 'signer="{}"'.format(sig))
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class ThingResource:
"""
Thing Resource
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
@classing.attributize
def onPostGen(self, skin, req, rep):
"""
Generator to perform Thing post with support for backend request
to validate issuant (HID)
attributes:
skin._status
skin._headers
are special and if assigned inside generator used by WSGI server
to update status and headers upon first non-empty write.
Does not use self. because only one instance of resource is used
to process all requests.
"""
skin._status = None # used to update status in iterator if not None
skin._headers = lodict() # used to update headers in iterator if not empty
yield b'' # ensure its a generator
sigs = parseSignatureHeader(req.get_header("Signature"))
dsig = sigs.get('did') # str not bytes thing's did signature
if not dsig:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid or missing Signature header.')
tsig = sigs.get('signer') # str not bytes thing's signer signature
if not tsig:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid or missing Signature header.')
try:
serb = req.stream.read() # bytes
except Exception:
raise httping.HTTPError(httping.BAD_REQUEST,
'Read Error',
'Could not read the request body.')
ser = serb.decode("utf-8")
# validate thing resource and verify did signature
try:
dat = validateSignedThingReg(dsig, ser)
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Could not validate the request body. {}'.format(ex))
# verify signer signature by looking up signer data resource in database
try:
sdid, index = dat["signer"].rsplit("#", maxsplit=1)
index = int(index) # get index and sdid from signer field
except (AttributeError, ValueError) as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid or missing did key index.') # missing sdid or index
# read and verify signer agent from database
try:
sdat, sser, ssig = dbing.getSelfSigned(sdid)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Resource Verification Error',
'Error verifying signer resource. {}'.format(ex))
# now use signer agents key indexed for thing signer to verify thing resource
try:
tkey = sdat['keys'][index]['key']
except (TypeError, IndexError, KeyError) as ex:
raise httping.HTTPError(httping.FAILED_DEPENDENCY,
'Data Resource Error',
'Missing signing key')
try:
validateSignedResource(tsig, ser, tkey)
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Could not validate the request body. {}'.format(ex))
tdid = dat['did'] # unicode version
if "hid" in dat and dat["hid"]: # non-empty hid
# validate hid control here
found = False
for issuant in sdat.get("issuants", []):
issuer = issuant.get("issuer")
try:
prefix, kind, issue = dat['hid'].split(":", maxsplit=2)
except ValueError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid hid format. {}'.format(ex))
if issue.startswith(issuer):
found = True
try:
result = yield from validateIssuerDomainGen(self.store,
sdat,
issuant,
timeout=0.5) # raises error if fails
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating issuant. {}'.format(ex))
try: # add entry to hids table to lookup did by hid
dbing.putHid(dat['hid'], tdid)
except DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
if not found:
raise httping.HTTPError(httping.FAILED_DEPENDENCY,
'Validation Error',
'Controlling Agent does not corresponding issuant')
# save to database core
try:
dbing.putSigned(key=tdid, ser=ser, sig=tsig, clobber=False)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
skin._status = httping.CREATED
didURI = falcon.uri.encode_value(tdid)
skin._headers["Location"] = "{}?did={}".format(THING_BASE_PATH, didURI)
# normally picks of content-type from type of request but set anyway to ensure
skin._headers["Content-Type"] = "application/json; charset=UTF-8"
body = json.dumps(dat, indent=2).encode()
# inside rep.stream generator, body is yielded or returned, not assigned to rep.body
return body
def on_post(self, req, rep):
"""
Handles POST requests
"""
rep.stream = self.onPostGen(req, rep) # iterate on stream generator
def on_get(self, req, rep):
"""
Handles GET request for an ThingResources given by query parameter
with did
"""
all_ = req.get_param("all") # returns url-decoded query parameter value
if all_ and all_.lower() == "true":
all_ = True
else:
all_ = False
hid = req.get_param("hid") # already has url-decoded query parameter value
did = req.get_param("did") # already has url-decoded query parameter value
if all_:
try: # read from database
entries = dbing.getThings()
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Lookup Error',
'Error retrieving resource. {}'.format(ex))
ser = json.dumps(entries, indent=2)
else:
if hid:
try: # read from database
did = dbing.getHid(hid)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
if not did: # empty entry
raise falcon.HTTPError(falcon.HTTP_NOT_FOUND,
'Not Found Error',
'DID for HID no longer exists.')
if not did:
raise falcon.HTTPError(falcon.HTTP_400,
'Query Parameter Error',
'Missing query parameters.')
try: # read from database
dat, ser, sig = dbing.getSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
if dat is None:
raise falcon.HTTPError(falcon.HTTP_NOT_FOUND,
'Not Found Error',
'DID "{}" resource does not exist'.format(did))
rep.set_header("Signature", 'signer="{}"'.format(sig))
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class ThingDidResource:
"""
Thing Did Resource
Access Thing resource by DID
/thing/{did}
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
@classing.attributize
def onPutGen(self, skin, req, rep, did):
"""
Generator to perform Agent put with support for backend request
to validate issuant (HID)
attributes:
skin._status
skin._headers
are special and if assigned inside generator used by WSGI server
to update status and headers upon first non-empty write.
Does not use self. because only one instance of resource is used
to process all requests.
"""
skin._status = None # used to update status in iterator if not None
skin._headers = lodict() # used to update headers in iterator if not empty
yield b'' # ensure its a generator
signature = req.get_header("Signature")
sigs = parseSignatureHeader(signature)
sig = sigs.get('signer') # str not bytes
if not sig:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Invalid or missing Signature header.')
csig = sigs.get('current') # str not bytes
if not csig:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Invalid or missing Signature header.')
try:
serb = req.stream.read() # bytes
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
'Read Error',
'Could not read the request body.')
ser = serb.decode("utf-8")
try: # validate did
ckey = extractDidParts(did)
except ValueError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Invalid did field. {}'.format(ex))
try: # Get validated existing resource from database
cdat, cser, psig = dbing.getSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying current thing resource. {}'.format(ex))
# extract sdid and keystr from signer field
try:
(sdid, index, akey) = extractDatSignerParts(cdat)
except ValueError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Missing or Invalid signer field. {}'.format(ex))
# Get validated signer resource from database
try:
sdat, sser, ssig = dbing.getSelfSigned(sdid)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying signer resource. {}'.format(ex))
# validate request
try:
dat = validateSignedThingWrite(sdat=sdat, cdat=cdat, csig=csig, sig=sig, ser=ser)
except ValidationError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Error validating the request body. {}'.format(ex))
if "hid" in dat: # new or changed hid
if ((dat["hid"] and not "hid" in cdat) or
(dat["hid"] and dat["hid"] != cdat["hid"])):
# validate hid control here
found = False
for issuant in sdat.get("issuants", []):
issuer = issuant.get("issuer")
try:
prefix, kind, issue = dat['hid'].split(":", maxsplit=2)
except ValueError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid hid format. {}'.format(ex))
if issue.startswith(issuer):
found = True
try:
result = yield from validateIssuerDomainGen(self.store,
sdat,
issuant,
timeout=0.5) # raises error if fails
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating issuant. {}'.format(ex))
try: # add entry to hids table to lookup did by hid
dbing.putHid(dat['hid'], did)
except DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
if not found:
raise httping.HTTPError(httping.FAILED_DEPENDENCY,
'Validation Error',
'Controlling Agent does not control corresponding issuant')
if ("hid" in cdat and cdat["hid"] and
(not "hid" in dat or dat["hid"] != cdat["hid"])):
try: # put empty in old cdat hid entry
dbing.putHid(cdat['hid'], "")
except DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
try: # save to database
dbing.putSigned(key=did, ser=ser, sig=sig, clobber=True)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_412,
'Database Error',
'{}'.format(ex.args[0]))
# normally picks of content-type from type of request but set anyway to ensure
skin._headers["Content-Type"] = "application/json; charset=UTF-8"
skin._headers["Signature"] = 'signer="{}"'.format(sig)
skin._status = httping.OK
# inside rep.stream generator, body is yielded or returned, not assigned to rep.body
return ser.encode()
def on_put(self, req, rep, did):
"""
Handles PUT requests
/thing/{did}
Falcon url decodes path parameters such as {did}
"""
rep.stream = self.onPutGen(req, rep, did) # iterate on stream generator
def on_get(self, req, rep, did):
"""
Handles GET request for an Thing Resource by did
/thing/{did}
Falcon url decodes path parameters such as {did}
"""
# read from database
try:
dat, ser, sig = dbing.getSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
if dat is None:
raise falcon.HTTPError(falcon.HTTP_NOT_FOUND,
'Not Found Error',
'DID resource does not exist')
rep.set_header("Signature", 'signer="{}"'.format(sig))
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class ThingDidOfferResource:
"""
Thing Did Offer Resource
Create offer to transfer title to Thing at DID message
/thing/{did}/offer
did is thing did
offer request fields
{
"uid": offeruniqueid,
"thing": thingDID,
"aspirant": AgentDID,
"duration": timeinsecondsofferisopen,
}
offer response fields
{
"uid": offeruniqueid,
"thing": thingDID,
"aspirant": AgentDID,
"duration": timeinsecondsofferisopen,
"expiration": datetimeofexpiration,
"signer": serverkeydid,
"offerer": ownerkeydid,
"offer": Base64serrequest
}
The value of the did to offer expires entry
{
"offer": "{did}/offer/{ouid}", # key of offer entry in core database
"expire": "2000-01-01T00:36:00+00:00", # ISO-8601 expiration date of offer
}
Database key is
did/offer/ouid
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
def on_post(self, req, rep, did):
"""
Handles POST requests
"""
signature = req.get_header("Signature")
sigs = parseSignatureHeader(signature)
sig = sigs.get('signer') # str not bytes
if not sig:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Invalid or missing Signature header.')
try:
serb = req.stream.read() # bytes
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
'Read Error',
'Could not read the request body.')
try: # validate did
tkey = extractDidParts(did)
except ValueError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Invalid did field. {}'.format(ex))
try: # Get validated thing resource from database
tdat, tser, tsig = dbing.getSigned(did)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying signer resource. {}'.format(ex))
try: # validate signer field
(adid, index, akey) = extractDatSignerParts(tdat)
except ValueError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Missing or Invalid signer field. {}'.format(ex))
try: # Get validated holder agent resource from database
adat, aser, asig = dbing.getSigned(adid)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying signer resource. {}'.format(ex))
# Get validated server resource from database
sdid = keeping.gKeeper.did
try:
sdat, sser, ssig = dbing.getSigned(sdid)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying signer resource. {}'.format(ex))
ser = serb.decode("utf-8")
try:
dat = validateSignedOfferData(adat, ser, sig, tdat)
except ValidationError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Invalid offer data. {}'.format(ex))
dt = datetime.datetime.now(tz=datetime.timezone.utc)
# build signed offer
odat, oser, osig = buildSignedServerOffer(dat, ser, sig, tdat, sdat, dt,
sk=keeping.gKeeper.sigkey)
# validate that no unexpired offers
entries = dbing.getOfferExpires(did)
if entries:
entry = entries[-1]
edt = arrow.get(entry["expire"])
if dt <= edt: # not yet expired
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Unexpired prevailing offer.')
# Build database key for offer
key = "{}/offer/{}".format(did, odat["uid"])
# save offer to database, raise error if duplicate
try:
dbing.putSigned(key=key, ser=oser, sig=osig, clobber=False) # no clobber so error
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_412,
'Database Error',
'{}'.format(ex.args[0]))
# save entry to offer expires database
odt = arrow.get(odat["expiration"])
result = dbing.putDidOfferExpire(did=did,
ouid=odat["uid"],
expire=odat["expiration"])
if not result: # should never happen
raise falcon.HTTPError(falcon.HTTP_400,
'Database Table Error',
'Failure making entry.')
didUri = falcon.uri.encode_value(did)
rep.status = falcon.HTTP_201 # post response status with location header
rep.location = "{}/{}/offer?uid={}".format(THING_BASE_PATH,
didUri,
odat["uid"])
rep.body = json.dumps(odat, indent=2)
def on_get(self, req, rep, did):
"""
Handles GET request for Thing offer resource with did
and uid in query params
"""
all_ = req.get_param("all") # returns url-decoded query parameter value
if all_ and all_.lower() == "true":
all_ = True
else:
all_ = False
latest = req.get_param("latest") # returns url-decoded query parameter value
if latest and latest.lower() == "true":
latest = True
else:
latest = False
ouid = req.get_param("uid") # returns url-decoded query parameter value
if all_ or latest:
lastOnly = False if all_ else True
try: # read from database
offerings = dbing.getOfferExpires(did, lastOnly=lastOnly)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Lookup Error',
'Error retrieving resource. {}'.format(ex))
for offering in offerings:
tdid, offer, ouid = offering['offer'].split("/")
offering['uid'] = ouid
del offering['offer']
ser = json.dumps(offerings, indent=2)
elif ouid:
key = "{}/offer/{}".format(did, ouid)
try: # read from database
dat, ser, sig = dbing.getSigned(key)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
rep.set_header("Signature", 'signer="{}"'.format(sig))
else:
raise falcon.HTTPError(falcon.HTTP_400,
'Query Parameter Error',
'Missing query parameters.')
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class ThingDidAcceptResource:
"""
Thing Did Accept Resource
Accept offer to transfer title to Thing at DID message
/thing/{did}/accept?uid=ouid
did is thing did
offer request fields
{
"uid": offeruniqueid,
"thing": thingDID,
"aspirant": AgentDID,
"duration": timeinsecondsofferisopen,
}
offer response fields
{
"uid": offeruniqueid,
"thing": thingDID,
"aspirant": AgentDID,
"duration": timeinsecondsofferisopen,
"expiration": datetimeofexpiration,
"signer": serverkeydid,
"offerer": ownerkeydid,
"offer": Base64serrequest
}
The value of the did to offer expires entry
{
"offer": "{did}/offer/{ouid}", # key of offer entry in core database
"expire": "2000-01-01T00:36:00+00:00", # ISO-8601 expiration date of offer
}
Database key is
did/offer/ouid
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
@classing.attributize
def onPostGen(self, skin, req, rep, did):
"""
Generator to perform Agent put with support for backend request
to validate issuant (HID)
attributes:
skin._status
skin._headers
are special and if assigned inside generator used by WSGI server
to update status and headers upon first non-empty write.
Does not use self. because only one instance of resource is used
to process all requests.
"""
skin._status = None # used to update status in iterator if not None
skin._headers = lodict() # used to update headers in iterator if not empty
yield b'' # ensure its a generator
ouid = req.get_param("uid") # returns url-decoded query parameter value
if not ouid:
raise httping.HTTPError(httping.BAD_REQUEST,
'Query Validation Error',
'Invalid or missing query parameter uid')
try: # validate did
tkey = extractDidParts(did)
except ValueError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Resource Verification Error',
'Invalid did. {}'.format(ex))
# read offer from database
key = "{}/offer/{}".format(did, ouid)
try:
odat, oser, osig = dbing.getSigned(key)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
dt = datetime.datetime.now(tz=datetime.timezone.utc)
# validate offer has not yet expired
odt = arrow.get(odat["expiration"])
if dt > odt: # expired
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Expired offer.')
# validate offer is latest
entries = dbing.getOfferExpires(did)
if entries:
entry = entries[-1]
edt = arrow.get(entry["expire"])
if odt != edt or entry['offer'] != key: # not latest offer
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Not latest offer.')
adid = odat['aspirant']
try: # validate validate aspirant did
akey = extractDidParts(adid)
except ValueError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Resource Verification Error',
'Invalid did field. {}'.format(ex))
# read aspirant data resource from database
try:
adat, aser, asig = dbing.getSelfSigned(adid)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
signature = req.get_header("Signature")
sigs = parseSignatureHeader(signature)
sig = sigs.get('signer') # str not bytes
if not sig:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid or missing Signature header.')
try:
serb = req.stream.read() # bytes
except Exception:
raise httping.HTTPError(httping.BAD_REQUEST,
'Read Error',
'Could not read the request body.')
ser = serb.decode("utf-8")
try:
dat = validateSignedThingTransfer(adat=adat, tdid=did, sig=sig, ser=ser)
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating the request body. {}'.format(ex))
try: # Get validated existing thing resource from database
cdat, cser, psig = dbing.getSigned(did)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Resource Verification Error',
'Error verifying current thing resource. {}'.format(ex))
if "hid" in dat: # new or changed hid
if ((dat["hid"] and not "hid" in cdat) or
(dat["hid"] and dat["hid"] != cdat["hid"])):
# validate hid control here
found = False
for issuant in adat.get("issuants", []):
issuer = issuant.get("issuer")
try:
prefix, kind, issue = dat['hid'].split(":", maxsplit=2)
except ValueError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Invalid hid format. {}'.format(ex))
if issue.startswith(issuer):
found = True
try:
result = yield from validateIssuerDomainGen(self.store,
adat,
issuant,
timeout=0.5) # raises error if fails
except ValidationError as ex:
raise httping.HTTPError(httping.BAD_REQUEST,
'Validation Error',
'Error validating issuant. {}'.format(ex))
try: # add entry to hids table to lookup did by hid
dbing.putHid(dat['hid'], did)
except DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
if not found:
raise httping.HTTPError(httping.FAILED_DEPENDENCY,
'Validation Error',
'Aspirant Agent does not control corresponding issuant')
if ("hid" in cdat and cdat["hid"] and
(not "hid" in dat or dat["hid"] != cdat["hid"])):
try: # put empty in old cdat hid entry
dbing.putHid(cdat['hid'], "")
except DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
# write new thing resource to database
try:
dbing.putSigned(key=did, ser=ser, sig=sig, clobber=True)
except dbing.DatabaseError as ex:
raise httping.HTTPError(httping.PRECONDITION_FAILED,
'Database Error',
'{}'.format(ex.args[0]))
skin._status = httping.CREATED
didURI = falcon.uri.encode_value(did)
skin._headers["Location"] = "{}/{}".format(THING_BASE_PATH, didURI)
# normally picks of content-type from type of request but set anyway to ensure
skin._headers["Content-Type"] = "application/json; charset=UTF-8"
body = json.dumps(dat, indent=2).encode()
# inside rep.stream generator, body is yielded or returned, not assigned to rep.body
return body
def on_post(self, req, rep, did):
"""
Handles POST requests
Post body is new Thing resource with new signer
did is thing DID
"""
rep.stream = self.onPostGen(req, rep, did) # iterate on stream generator
class AnonMsgResource:
"""
Anonymous Message Resource
Create and Read anonymous messages
/anon
/anon?uid=abcdef12
Database key is
uid
{
create: serverdatetimecreatestamp,
expire: serverdatetimeexpirestamp
anon:
{
uid: uid,
content: xoredgatewaylocationstringormsg,
date: gatewaydatetime,
}
}
uid is message uid string up to 32 bytes
if tracker then ephemeral ID in base64 url safe
content is message content string up to 256 bytes
if tracker then location string in in base64 url safe
dts is iso8601 datetime stamp
The value of the entry is serialized JSON
{
create: 1501774813367861, # creation in server time microseconds since epoch
expire: 1501818013367861, # expiration in server time microseconds since epoch
anon:
{
uid: "AQIDBAoLDA0=", # base64 url safe of 8 byte eid
content: "EjRWeBI0Vng=", # base64 url safe of 8 byte location
date: "2000-01-01T00:36:00+00:00", # ISO-8601 creation date of anon gateway time
}
}
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
def on_post(self, req, rep):
"""
Handles POST requests
Post body is tracking message from Gateway
{
uid: "AQIDBAoLDA0=", # base64 url safe of 8 byte eid
content: "EjRWeBI0Vng=", # base64 url safe of 8 byte location
date: "2000-01-01T00:36:00+00:00", # ISO-8601 creation date of anon gateway time
}
uid is up 32 bytes
if anon ephemeral ID in base64 url safe
content is message up to 256 bytes
if location string in base 64 url safe
date is iso8601 datetime
This is augmented with server time stamp and stored in database
{
create: 1501774813367861, # creation in server time microseconds since epoch
expire: 1501818013367861, # expiration in server time microseconds since epoch
anon:
{
uid: "AQIDBAoLDA0=", # base64 url safe of 8 byte eid
content: "EjRWeBI0Vng=", # base64 url safe of 8 byte location
date: "2000-01-01T00:36:00+00:00", # ISO-8601 creation date of anon gateway time
}
}
"""
try:
serb = req.stream.read() # bytes
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
'Read Error',
'Could not read the request body.')
ser = serb.decode("utf-8")
try:
dat = validateAnon(ser=ser)
except ValidationError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Validation Error',
'Error validating the request body. {}'.format(ex))
uid = dat['uid']
dt = datetime.datetime.now(tz=datetime.timezone.utc)
create = int(dt.timestamp() * 1000000) # timestamp in microseconds since epoch
expire = create + int(ANON_EXPIRATION_DELAY * 1000000)
sdat = ODict()
sdat["create"] = create
sdat["expire"] = expire
sdat["anon"] = dat
# write new anon data resource to database at uid
try:
dbing.putAnonMsg(key=uid, data=sdat)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_412,
'Database Error',
'{}'.format(ex.args[0]))
# write new expiration of anon uid to database
try:
dbing.putExpireUid(key=expire, uid=uid)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_412,
'Database Error',
'{}'.format(ex.args[0]))
eidUri = falcon.uri.encode_value(uid)
rep.status = falcon.HTTP_201 # post response status with location header
rep.location = "{}?uid={}".format(ANON_MSG_BASE_PATH, eidUri)
rep.body = json.dumps(sdat, indent=2)
def on_get(self, req, rep):
"""
Handles GET request for anon resource and uid in query params
"""
all_ = req.get_param("all") # returns url-decoded query parameter value
if all_ and all_.lower() == "true":
all_ = True
else:
all_ = False
uid = req.get_param("uid") # returns url-decoded query parameter value
if all_:
try: # read from database
entries = dbing.getAllAnonUids()
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Lookup Error',
'Error retrieving resource. {}'.format(ex))
ser = json.dumps(entries, indent=2)
else:
if not uid:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Error',
'Missing or invalid query parameter uid.')
# read all anon msgs from database for given uid
anons = []
try:
anons = dbing.getAnonMsgs(key=uid)
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Error',
'Resource malformed. {}'.format(ex))
if not anons:
raise falcon.HTTPError(falcon.HTTP_NOT_FOUND,
'Not Found Error',
'Anon uid does not exist')
ser = json.dumps(anons, indent=2)
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = ser
class CheckHidResource:
"""
Check Hid Resource
Responds to challenge for hid namespace
used as demonstration
Attributes:
.store is reference to ioflo data store
"""
def __init__(self, store=None, **kwa):
"""
Parameters:
store is reference to ioflo data store
"""
super(**kwa)
self.store = store
def on_get(self, req, rep):
"""
Handles GET request for HID namespace check given by
query parameters:
did for issuing agent
check (challenge) text is concatenation of:
did|issuer|date
where issuer is from issuants in agent resource
date is iso-8601 date
Response is signature in signature header with json body
{
signer: keyedsignerkeyfromagent,
check: did|issuer|date
}
"""
# have to create so test verify HID has keys to respond put in demo db
agents, things = dbing.setupTestDbAgentsThings(dbn='demo', clobber=True)
did = req.get_param("did") # already has url-decoded query parameter value
check = req.get_param("check") # already has url-decoded query parameter value
if not did or not check:
raise falcon.HTTPError(falcon.HTTP_400,
"Query Parameter Error",
"Missing query parameter one of ('did','check')")
# read from database
try:
dat, ser, sig = dbing.getSelfSigned(did, dbn='demo')
except dbing.DatabaseError as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'Error verifying resource. {}'.format(ex))
# get verification key
index = 0
key = dat['keys'][index]['key']
# find signing key
sk = None
vk = None
for agent in agents.values(): # find match
adid, avk, ask = agent
if adid == did: # found
sk = ask
vk = avk
break
if not sk or not vk:
raise falcon.HTTPError(falcon.HTTP_400,
'Resource Verification Error',
'DID not match. {}'.format(ex))
verkey = keyToKey64u(vk)
if verkey != key:
raise falcon.HTTPError(falcon.HTTP_400,
"Invalid Key",
"Unexpected Key")
# sign and return
sig = keyToKey64u(libnacl.crypto_sign(check.encode("utf-8"), sk)[:libnacl.crypto_sign_BYTES])
signer = "{}#{}".format(did, index)
data = ODict()
data['signer'] = signer
data['check'] = check
rep.set_header("Signature", 'signer="{}"'.format(sig))
rep.set_header("Content-Type", "application/json; charset=UTF-8")
rep.status = falcon.HTTP_200 # This is the default status
rep.body = json.dumps(data, indent=2)
def loadEnds(app, store):
"""
Load endpoints for app with store reference
This function provides the endpoint resource instances
with a reference to the data store
"""
sink = StaticSink()
app.add_sink(sink, prefix=DEFAULT_STATIC_BASE_PATH)
server = ServerResource(store=store)
app.add_route('{}'.format(SERVER_BASE_PATH), server)
agent = AgentResource(store=store)
app.add_route('{}'.format(AGENT_BASE_PATH), agent)
agentDid = AgentDidResource(store=store)
app.add_route('{}/{{did}}'.format(AGENT_BASE_PATH), agentDid)
agentDrop = AgentDidDropResource(store=store)
app.add_route('{}/{{did}}/drop'.format(AGENT_BASE_PATH), agentDrop)
thing = ThingResource(store=store)
app.add_route('{}'.format(THING_BASE_PATH), thing)
thingDid = ThingDidResource(store=store)
app.add_route('{}/{{did}}'.format(THING_BASE_PATH), thingDid)
thingOffer = ThingDidOfferResource(store=store)
app.add_route('{}/{{did}}/offer'.format(THING_BASE_PATH), thingOffer)
thingAccept = ThingDidAcceptResource(store=store)
app.add_route('{}/{{did}}/accept'.format(THING_BASE_PATH), thingAccept)
anon = AnonMsgResource(store=store)
app.add_route('{}'.format(ANON_MSG_BASE_PATH), anon)
checkHid = CheckHidResource(store=store)
app.add_route('{}/check'.format(DEMO_BASE_PATH), checkHid)
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
12915,
278,
19937,
198,
198,
3041,
2257,
886,
13033,
198,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
17301,
62,
11338,
198,
198,
11748,
25064,
198,
11748,... | 1.925576 | 35,311 |
from .utils import get_request, post_request, patch_request, authorized
| [
6738,
764,
26791,
1330,
651,
62,
25927,
11,
1281,
62,
25927,
11,
8529,
62,
25927,
11,
10435,
198
] | 4 | 18 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
tsurukame()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
256,
11793,
2724,
480,
3419,
628
] | 2.090909 | 44 |
from __future__ import annotations
import ast
import os
import platform
import re
import shutil
import sys
import tarfile
import tempfile
import zipfile
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from poetry.core import __version__
from poetry.core.factory import Factory
from poetry.core.masonry.builder import Builder
if TYPE_CHECKING:
from pytest_mock import MockerFixture
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(autouse=True)
@pytest.mark.skipif(
sys.platform == "win32"
and sys.version_info <= (3, 6)
or platform.python_implementation().lower() == "pypy",
reason="Disable test on Windows for Python <=3.6 and for PyPy",
)
@pytest.mark.skipif(
sys.platform == "win32"
and sys.version_info <= (3, 6)
or platform.python_implementation().lower() == "pypy",
reason="Disable test on Windows for Python <=3.6 and for PyPy",
)
@pytest.mark.skipif(
sys.platform == "win32"
and sys.version_info <= (3, 6)
or platform.python_implementation().lower() == "pypy",
reason="Disable test on Windows for Python <=3.6 and for PyPy",
)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
6468,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
302,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
11748,
13422,
7753,
198,
11748,
20218,
7753,
198,
11748,
19974,
7753,
198,
1... | 2.899244 | 397 |
import numpy as np
from numpy.linalg import norm
if __name__ == '__main__':
# A = np.array([
# [10, -1, 2, 0],
# [-1, 11, -1, 3],
# [2, -1, 10, -1],
# [0, 3, -1, 8]
# ])
# b = np.array([6, 25, -11, 15])
# XO = np.zeros([4, 1])
A = np.array([
[4, 3, 0],
[3, 4, -1],
[0, -1, 4]
])
b = np.array([24, 30, -24])
XO = np.ones([3, 1])
N = 30
TOL = 1e-6
sol = linearSys(A, b)
print('Jacobi:', sol.JacobiIterative(XO, N, TOL))
print('Gauss-Seidel:', sol.GaussSeidelIterative(XO, N, TOL))
print('SOR:', sol.SOR(XO, N, TOL, 1.241)) | [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
75,
1292,
70,
1330,
2593,
628,
220,
220,
220,
220,
220,
220,
220,
220,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
317,
796,... | 1.718085 | 376 |
hour = 12
minutes = 30
res = Solution().angleClock(hour, minutes)
print(res) | [
198,
198,
9769,
796,
1105,
198,
1084,
1769,
796,
1542,
198,
411,
796,
28186,
22446,
9248,
44758,
7,
9769,
11,
2431,
8,
198,
4798,
7,
411,
8
] | 2.888889 | 27 |
# !/uer/bin/env python3
# coding=utf-8
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from base.log import logged
| [
2,
5145,
14,
15573,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
28,
40477,
12,
23,
198,
11748,
640,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13... | 3.411111 | 90 |
import numpy as np
import _lib.pr_func as pr
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4808,
8019,
13,
1050,
62,
20786,
355,
778,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
198
] | 2.433333 | 30 |
'''
slrfield cpf subpackage
This subpackage defines the following functions:
# cpf_download.py
download_bycurrent - Download the latest CPF ephemeris files at the current moment
download_bydate - Download the latest CPF ephemeris files before a specific time
cpf_download - It combines the features of download_bycurrent and download_bydate.
If the date and time are not given, then the latest cpf ephemeris files at the current time are downloaded;
otherwise, download the latest cpf ephemeris files before the given date and time.
try_download - Connect to the server and try to download the cpf ephemeris files
# cpf_read.py
read_cpf - Parse a single CPF ephemeris file
read_cpfs - Parse a set of CPF ephemeris files
# cpf_interpolate.py
cpf_interpolate - Interpolate the CPF ephemeris and make the prediction
interp_ephem - Interpolate the CPF ephemeris
itrs2horizon - Convert cartesian coordinates of targets in ITRF to spherical coordinates in topocentric reference frame for a specific station
iso2sod - Calculate Second of Day from 'iso-formatted' time sets, such as '2017-01-02 03:04:05.678'
''' | [
7061,
6,
198,
6649,
81,
3245,
31396,
69,
850,
26495,
198,
198,
1212,
850,
26495,
15738,
262,
1708,
5499,
25,
198,
198,
2,
31396,
69,
62,
15002,
13,
9078,
220,
198,
198,
15002,
62,
1525,
14421,
532,
10472,
262,
3452,
16932,
37,
2462,... | 3.428135 | 327 |
from trace_gen_base import TraceGenBase
# main()
if __name__ == "__main__":
tg = TestReorder()
tg.generate()
| [
6738,
12854,
62,
5235,
62,
8692,
1330,
34912,
13746,
14881,
628,
198,
198,
2,
1388,
3419,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
256,
70,
796,
6208,
3041,
2875,
3419,
198,
220,
256,
70,
13,
8612,
378... | 2.577778 | 45 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet
from mms.model_service.gluon_vision_service import GluonVisionService
class PretrainedAlexnetService(GluonVisionService):
"""
Pretrained alexnet Service
"""
| [
2,
15069,
2864,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
2393,
2845,
287... | 3.57346 | 211 |
import torch
from torch import nn
from .util import calculate_iou
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
198,
6738,
764,
22602,
1330,
15284,
62,
72,
280,
628,
198
] | 3.45 | 20 |
# -*- coding: utf-8 -*-
"""
====================================================================
Custom NumPy Quaternion class (:mod:`sknano.core.math._quaternion`)
====================================================================
.. currentmodule:: sknano.core.math._quaternion
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
# import copy
import numbers
import warnings
import numpy as np
np.seterr(all='warn')
# from ._point import Point
# from ._transforms import rotate, transformation_matrix
from ._vector import Vector
__all__ = ['Quaternion']
class Quaternion(np.ndarray):
"""Abstract object representation of a quaternion.
Parameters
----------
dtype : data-type, optional
copy : bool, optional
Examples
--------
"""
__array_priority__ = 15.0
_verbosity = 0
__radd__ = __add__
__rmul__ = __mul__
@classmethod
@property
def w(self):
"""Real component of `Quaternion`."""
return self.real
@property
def v(self):
"""Vector of imaginary components of `Quaternion`."""
return Vector(self.imag)
@property
def real(self):
"""Real part :math:`w` of `Quaternion`."""
return self[0]
@property
def imag(self):
""":class:`~python:list` of `Quaternion` imaginary components \
:math:`x,y,z`, with imaginary part \
:math:`x\\mathbf{i}+y\\mathbf{j}+z\\mathbf{k}`."""
return self.__array__()[1:].tolist()
@property
@property
@property
@property
def axis(self):
"""Rotation axis."""
return Vector([self.x, self.y, self.z]) / np.sin(np.arccos(self.w))
@property
def angle(self):
"""Rotation angle."""
return 2 * np.arccos(self.w)
@property
@property
@property
@property
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
23926,
1421,
198,
15022,
31835,
20519,
2264,
9205,
295,
1398,
357,
25,
4666,
25,
63,
8135,
77,
5733,
13,
7295,
13,
11018,
13557,
421,
9205,
295,
63,
8,
198... | 2.605653 | 743 |
""""""
from collections import OrderedDict
import copy
import inspect
import logging
from neurodocker.generators.common import _add_to_entrypoint
from neurodocker.generators.common import _get_json_spec_str
from neurodocker.generators.common import _installation_implementations
from neurodocker.generators.common import _install
from neurodocker.generators.common import _Users
from neurodocker.generators.common import ContainerSpecGenerator
from neurodocker.generators.common import NEURODOCKER_ENTRYPOINT
logger = logging.getLogger(__name__)
| [
15931,
15931,
15931,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
4866,
198,
11748,
10104,
198,
11748,
18931,
198,
198,
6738,
7669,
45986,
13,
8612,
2024,
13,
11321,
1330,
4808,
2860,
62,
1462,
62,
13000,
4122,
198,
67... | 3.704698 | 149 |
# switching to the file logger
from os.path import join, exists, dirname, abspath
from os import remove, getcwd
from pyomexmeta import RDF, Logger
cellml = '''<?xml version=\"1.1\" encoding=\"UTF-8\"?>
<model xmlns=\"http://www.cellml.org/cellml/1.1#\" xmlns:cmeta=\"http://www.cellml.org/metadata/1.0#\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" xmlns:bqs=\"http://www.cellml.org/bqs/1.0#\" xmlns:semsim=\"http://bime.uw.edu/semsim/#\" xmlns:dc=\"http://purl.org/dc/terms/\" xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" name=\"annotation_examples\" cmeta:id=\"annExamples\">
<component name=\"main\">
<variable cmeta:id=\"main.Volume\" initial_value=\"100\" name=\"Volume\" units=\"dimensionless\" />
<variable cmeta:id=\"main.MembraneVoltage\" initial_value=\"-80\" name=\"MembraneVoltage\" units=\"dimensionless\" />
<variable cmeta:id=\"main.ReactionRate\" initial_value=\"1\" name=\"ReactionRate\" units=\"dimensionless\" />
</component>
</model>'''
logger_file = join(getcwd(), "log.log")
print(f"Logger file is: \"{logger_file}\"")
# if already exists, remove
if exists(logger_file):
remove(logger_file)
assert not exists(logger_file)
# activate the file logger
Logger.file_logger(logger_file)
rdf = RDF.from_string(cellml, syntax="turtle") # nothing is emitted to console
# ensure logging content has been written to disk
Logger.flush()
# now check logger_file
print(f"Reading logging data from \"{logger_file}\":")
with open(logger_file) as f:
print(f.read())
# now activate the console logger again
Logger.console_logger()
print("Switching back to the console logger:")
rdf = RDF.from_string(cellml, syntax="turtle") # and our log message is back
| [
2,
15430,
284,
262,
2393,
49706,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
7160,
11,
26672,
3672,
11,
2352,
6978,
198,
6738,
28686,
1330,
4781,
11,
651,
66,
16993,
198,
6738,
12972,
462,
87,
28961,
1330,
371,
8068,
11,
5972,
1362,
... | 2.5625 | 720 |
from django.urls import path
from .views import AboutPageView, ContactUsView, HomePageView
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('about/', AboutPageView.as_view(), name='about'),
path('contact-us/', ContactUsView.as_view(), name='contact_us'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
7994,
9876,
7680,
11,
14039,
5842,
7680,
11,
5995,
9876,
7680,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5995,
9876,
768... | 2.9 | 100 |
from vardbg import ansi
from .misc import knapsack
from .run import debug_func
# | Item | Weight | Value |
# |------|--------|-------|
# | 1 | 2 | 1 |
# | 2 | 10 | 20 |
# | 3 | 3 | 3 |
# | 4 | 6 | 14 |
# | 5 | 18 | 100 |
# Put a placeholder 0 weight, 0 value item to max
# these line up better with the 1D memoization table K
KS_WEIGHTS = [0, 2, 10, 3, 6, 18]
KS_VALUES = [0, 1, 20, 3, 14, 100]
KS_TOTAL_WEIGHT = 15
| [
6738,
410,
446,
35904,
1330,
9093,
72,
198,
198,
6738,
764,
44374,
1330,
638,
1686,
441,
198,
6738,
764,
5143,
1330,
14257,
62,
20786,
198,
198,
2,
930,
9097,
930,
14331,
930,
11052,
930,
198,
2,
930,
23031,
91,
982,
91,
26866,
91,
... | 2.164384 | 219 |
from discord import Intents
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from discord.ext.commands import Bot as BotBase
from dotenv import load_dotenv
from discord import Embed
import os
from datetime import datetime
from discord.ext.commands import Bot as BotBase
from discord.ext.commands import CommandNotFound
import dotenv
# from six import create_bound_method
load_dotenv()
PREFIX ="%"
OWNER_IDS = [759826655400951808]
KiM3X = Bot()
# KiM3X.run() | [
6738,
36446,
1330,
2558,
658,
220,
198,
6738,
257,
862,
1740,
18173,
13,
1416,
704,
377,
364,
13,
292,
13361,
952,
1330,
220,
1081,
13361,
40,
2640,
1740,
18173,
198,
6738,
257,
862,
1740,
18173,
13,
2213,
328,
5355,
13,
66,
1313,
1... | 3.052632 | 171 |
#!/usr/bin/env python3
__author__ = "Patrick Reckeweg"
__copyright__ = ""
import matplotlib as mlp
import matplotlib.pyplot as plt
from random import *
import array as arr
import miller_rabbin as ml
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
834,
9800,
834,
220,
220,
220,
220,
220,
796,
366,
32718,
3311,
365,
732,
70,
1,
198,
834,
22163,
4766,
834,
220,
220,
796,
13538,
198,
198,
11748,
2603,
29487,
8019,
355,
25962,... | 2.736842 | 76 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
run all flux convert and inventory runs
Things to change if rerun is needed:
- path
- volumesandmats.txt
"""
import matplotlib.pyplot as plt
import os
import numpy as np
outputfile='NESSA-Neutron-Openo'
path='NESSA0505open'
sourceRate=1e11 #1/s
materials={}
materials['11']={'density': 7.874, 'ref': 'ironRef'}
materials['12']={'density': 11.34, 'ref': 'leadRef'}
materials['3']={'density': 2.30, 'ref': 'concrete3Ref'}
materials['5']={'density': 0.96, 'ref': 'plasticRef'}
materials['6']={'density': 3.35, 'ref': 'concrete6Ref'}
materials['7']={'density': 3.9, 'ref': 'concrete7Ref'}
tallies=[line.strip().split()[1] for line in os.popen('grep 1tally %s'%outputfile).readlines() if line.strip().split()[1].isdecimal()]
tally={}
#plt.figure()
for i in tallies:
tally[i]={}
tally[i]['energy'],tally[i]['flux'],tally[i]['error'],tally[i]['total'],tally[i]['totalerr'],tally[i]['volume']=tallyread('%s'%outputfile,i)
if i not in ['4','14','24','34']:
volmat=os.popen('grep -w %s volumesandmats.txt'%i[:-1]).readlines()[0].strip().split()
tally[i]['mat']=volmat[2]
tally[i]['density']=materials[volmat[2]]['density']
tally[i]['mass']=tally[i]['volume']*tally[i]['density']
elif i =='14':
volmat=os.popen('grep -w %s volumesandmats.txt'%1310).readlines()[0].strip().split()
tally[i]['mat']=volmat[2]
tally[i]['density']=materials[volmat[2]]['density']
tally[i]['mass']=tally[i]['volume']*tally[i]['density']
elif i =='24':
volmat=os.popen('grep -w %s volumesandmats.txt'%2430).readlines()[0].strip().split()
tally[i]['mat']=volmat[2]
tally[i]['density']=materials[volmat[2]]['density']
tally[i]['mass']=tally[i]['volume']*tally[i]['density']
elif i =='34':
volmat=os.popen('grep -w %s volumesandmats.txt'%2060).readlines()[0].strip().split()
tally[i]['mat']=volmat[2]
tally[i]['density']=materials[volmat[2]]['density']
tally[i]['mass']=tally[i]['volume']*tally[i]['density']
else:
volmat=os.popen('grep -w %s volumesandmats.txt'%2310).readlines()[0].strip().split()
tally[i]['mat']=volmat[2]
tally[i]['density']=materials[volmat[2]]['density']
tally[i]['mass']=tally[i]['volume']*tally[i]['density']
print('-----')
print(sum(tally[i]['flux']),tally[i]['total'],tally[i]['totalerr'])
print(tally[i]['volume'])
print(tally[i]['mass'])
print(tally[i]['density'])
print(tally[i]['mat'])
if tally[i]['totalerr']<1/100:
tally[i]['fluxes']='/home/zsolt/FISPACT-II/%s/flux_convert_tally%s/fluxes'%(path,i)
else:
tally[i]['fluxes']='/home/zsolt/FISPACT-II/%s/flux_convert_tally%s/fluxes'%(path,'30004')
#####
#
# FLux convert runs
#
#####
for i in tally.keys():
en=np.flip(1e6*tally[i]['energy'])
flux=np.flip(tally[i]['flux'][1:]) #dropping the 710th group 0-1e-11
arbstr=''
for eg in en:
arbstr+='%.4e\n'%eg
for fl in flux:
arbstr+='%.4e\n'%fl
arbstr+='1.00\nNessa Spectrum'
os.chdir('/home/zsolt/FISPACT-II/%s'%path)
os.mkdir('flux_convert_tally%s'%i)
os.system('cp fluxconvertRef/files.convert flux_convert_tally%s/files.convert'%i)
os.system('cp fluxconvertRef/convert.i flux_convert_tally%s/convert.i'%i)
os.system('cp fluxconvertRef/fisprun.sh flux_convert_tally%s/fisprun.sh'%i)
filename='flux_convert_tally%s/arb_flux'%i
arbfile=open(filename,'w')
arbfile.write(arbstr)
arbfile.close()
os.chdir('/home/zsolt/FISPACT-II/%s/flux_convert_tally%s'%(path,i))
os.system('./fisprun.sh')
#####
#
# Inventory runs
#
#####
for i in tally.keys():
os.chdir('/home/zsolt/FISPACT-II/%s'%path)
os.mkdir('inventory%s'%i)
os.system('cp collapse.i inventory%s/collapse.i'%i)
os.system('cp condense.i inventory%s/condense.i'%i)
os.system('cp print_lib.i inventory%s/print_lib.i'%i)
os.system('cp fisprun.sh inventory%s/fisprun.sh'%i)
os.system('cp files inventory%s/files'%i)
os.system('cp %s inventory%s/fluxes'%(tally[i]['fluxes'],i))
with open ('/home/zsolt/FISPACT-II/%s/'%path+materials[tally[i]['mat']]['ref'], "r") as reffile:
inpRef=reffile.read()
inpRef=inpRef.replace('MassStr',str(tally[i]['mass']/1000))
fluxi=sourceRate*(tally[i]['total']+tally[i]['total']*tally[i]['totalerr'])
inpRef=inpRef.replace('FluxStr',str(fluxi))
filename='inventory%s/inventory.i'%i
invfile=open(filename,'w')
invfile.write(inpRef)
invfile.close()
print('-----------------')
print(i)
print('-----------------')
os.chdir('/home/zsolt/FISPACT-II/%s/inventory%s'%(path,i))
os.system('./fisprun.sh')
os.system('rm ARRAYX')
os.system('rm COLLAPX')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
5143,
477,
28462,
10385,
290,
13184,
4539,
198,
198,
22248,
284,
1487,
611,
302,
5143,
318,
2622,
25,
... | 2.012397 | 2,420 |
#!/usr/bin/env python
import click
from influxdb import InfluxDBClient
import os
from dateutil import parser
from datetime import date, datetime, timedelta
import time
import requests
from urllib import parse
@click.command()
if __name__ == '__main__':
monitor()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
3904,
198,
6738,
25065,
9945,
1330,
4806,
22564,
11012,
11792,
198,
11748,
28686,
198,
6738,
3128,
22602,
1330,
30751,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
11,
28805,
... | 3.147727 | 88 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sqlalchemy import and_, desc
from flask_login import current_user
from vhoops import db, logging
from vhoops.wrappers.exceptions import handle_exception, ItemNotFoundError, DependencyError
from vhoops.modules.teams.api.models import Teams
@handle_exception
@handle_exception
@handle_exception
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
44161,
282,
26599,
1330,
290,
62,
11,
1715,
198,
6738,
42903,
62,
38235,
1330,
1459,
62,
7220,
198,
198,
6738,
41... | 3.034783 | 115 |
import logging
from operator import eq
from sqlalchemy import update, MetaData, Table, and_, or_, delete
from sqlalchemy.dialects.mysql import insert
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from watchmen.common.mysql.model.table_definition import get_table_model, get_primary_key, parse_obj, count_table
from watchmen.common.mysql.mysql_engine import engine
from watchmen.common.storage.storage_template import DataPage
from watchmen.common.utils.data_utils import build_data_pages
from watchmen.common.utils.data_utils import convert_to_dict
log = logging.getLogger("app." + __name__)
log.info("mysql template initialized")
| [
11748,
18931,
198,
6738,
10088,
1330,
37430,
198,
198,
6738,
44161,
282,
26599,
1330,
4296,
11,
30277,
6601,
11,
8655,
11,
290,
62,
11,
393,
62,
11,
12233,
198,
6738,
44161,
282,
26599,
13,
38969,
478,
82,
13,
28744,
13976,
1330,
7550... | 3.396985 | 199 |
import json
import azure.functions as func
| [
11748,
33918,
201,
198,
11748,
35560,
495,
13,
12543,
2733,
355,
25439,
201,
198,
220,
220,
220,
220
] | 2.722222 | 18 |
from sklear import metics
| [
6738,
1341,
3238,
1330,
1138,
873,
628
] | 3.857143 | 7 |
import itertools
import pytest
from fastweather.utils import (
get_current_percentage,
extract_info,
load_cities_in_chunks,
parse_weather_request,
)
def test_load_cities_in_chunks_raises_error():
"""Raises error when `chunks` value is larger than range(1, 20 <inclusive>)"""
with pytest.raises(ValueError):
generator = load_cities_in_chunks("tests/test_cities_id.txt", chunks=30)
next(generator)
| [
11748,
340,
861,
10141,
198,
11748,
12972,
9288,
198,
6738,
3049,
23563,
13,
26791,
1330,
357,
198,
220,
220,
220,
651,
62,
14421,
62,
25067,
496,
11,
198,
220,
220,
220,
7925,
62,
10951,
11,
198,
220,
220,
220,
3440,
62,
66,
871,
... | 2.554913 | 173 |
import json
import pytest
from brownie.network import web3, accounts
from tests.fixtures.depositor_fixtures import (
DEPOSITOR_BASE_FIXTURES, DEPOSITOR_FIXTURES_WITH_HIGH_GAS,
DEPOSITOR_FIXTURES_WITH_DEPOSIT_PROHIBIT, DEPOSITOR_FIXTURES_NOT_ENOUGH_BUFFERED_ETHER,
DEPOSITOR_BASE_FIXTURES_SMALL_BALANCE, DEPOSITOR_FIXTURES_NO_FREE_KEYS,
)
from tests.fixtures.gas_fee_fixtures import GAS_FEE_FIXTURES
from tests.fixtures.pause_bot_fixtures import PAUSE_BOT_FIXTURES, PAUSED_PROTOCOL_FIXTURES
from tests.utils.mock_provider import MockProvider
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
| [
11748,
33918,
198,
198,
11748,
12972,
9288,
198,
6738,
7586,
494,
13,
27349,
1330,
3992,
18,
11,
5504,
198,
198,
6738,
5254,
13,
69,
25506,
13,
10378,
418,
2072,
62,
69,
25506,
1330,
357,
198,
220,
220,
220,
5550,
37997,
2043,
1581,
... | 2.419903 | 412 |
#!/usr/bin/env python
"""
Fill the "result" table with historic results
(results_xxyy_with_gw.csv).
"""
import argparse
import os
from airsenal.framework.mappings import alternative_team_names
from airsenal.framework.schema import Result, session_scope, session
from airsenal.framework.data_fetcher import FPLDataFetcher
from airsenal.framework.utils import (
NEXT_GAMEWEEK,
get_past_seasons,
find_fixture,
CURRENT_SEASON,
)
def make_result_table(seasons=[], dbsession=session):
"""
past seasons - read results from csv
"""
if not seasons:
seasons = get_past_seasons(3)
for season in seasons:
inpath = os.path.join(
os.path.dirname(__file__), "../data/results_{}_with_gw.csv".format(season)
)
infile = open(inpath)
fill_results_from_csv(infile, season, dbsession)
"""
current season - use API
"""
gw_end = NEXT_GAMEWEEK
fill_results_from_api(1, gw_end, CURRENT_SEASON, dbsession)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="fill table of match results")
parser.add_argument("--input_type", help="csv or api", default="csv")
parser.add_argument("--input_file", help="input csv filename")
parser.add_argument(
"--season",
help="if using a single csv, specify the season",
type=str,
default=None,
)
parser.add_argument(
"--gw_start", help="if using api, which gameweeks", type=int, default=1
)
parser.add_argument("--gw_end", help="if using api, which gameweeks", type=int)
args = parser.parse_args()
with session_scope() as dbsession:
make_result_table(dbsession=dbsession)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
33762,
262,
366,
20274,
1,
3084,
351,
9566,
2482,
198,
7,
43420,
62,
5324,
22556,
62,
4480,
62,
70,
86,
13,
40664,
737,
198,
37811,
198,
198,
11748,
1822,
29572,
198... | 2.513991 | 679 |
from .wbits import Wonderbits
| [
6738,
764,
86,
9895,
1330,
12902,
9895,
628,
198,
220,
220,
220,
220
] | 2.769231 | 13 |
def fruit_distribution(s,n):
"""
In this task, you will be given a string that represents a number of apples and oranges
that are distributed in a basket of fruit this basket contains
apples, oranges, and mango fruits. Given the string that represents the total number of
the oranges and apples and an integer that represent the total number of the fruits
in the basket return the number of the mango fruits in the basket.
for examble:
fruit_distribution("5 apples and 6 oranges", 19) ->19 - 5 + 6 = 8
fruit_distribution("0 apples and 1 oranges",3) -> 3 - 0 + 1 = 2
fruit_distribution("2 apples and 3 oranges", 100) -> 100 - 2 + 3 = 95
fruit_distribution("100 apples and 1 oranges",120) -> 120 - 100 + 1 = 19
Example solution:
# line 1
lis = list()
# line 2
for i in s.split(' '):
# line 3
if s.isdigit():
# line 4
lis.append(int(i))
# line 5
return n - sum(lis)
"""
# Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4
# END OF CONTEXT
print("3")
# END OF SOLUTION
if __name__ == '__main__':
check(fruit_distribution)
| [
4299,
8234,
62,
17080,
3890,
7,
82,
11,
77,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
554,
428,
4876,
11,
345,
481,
307,
1813,
257,
4731,
326,
6870,
257,
1271,
286,
22514,
290,
48389,
220,
198,
220,
220,
220,
326,
389,
... | 2.782805 | 442 |
import pytest
import datetime
import os
from pyquery import PyQuery
from factories import EmailListFactory, MessageFactory, ThreadFactory
from mlarchive.archive.views_static import (rebuild_static_index,
link_index_page, build_static_pages, is_small_year)
"""
@pytest.mark.django_db(transaction=True)
def test_rebuild_static_index(static_list):
rebuild_static_index(static_list)
assert True
@pytest.mark.django_db(transaction=True)
def test_build_static_pages(static_list, settings, static_dir):
settings.STATIC_INDEX_YEAR_MINIMUM = 20
build_static_pages(static_list)
path = os.path.join(static_dir, static_list.name)
assert '2017.html' in os.listdir(path)
@pytest.mark.django_db(transaction=True)
def test_link_index_page(static_list, settings, static_dir):
settings.STATIC_INDEX_YEAR_MINIMUM = 20
build_static_pages(static_list)
link_index_page(static_list)
path = os.path.join(static_dir, static_list.name)
assert 'index.html' in os.listdir(path)
assert 'thread.html' in os.listdir(path)
@pytest.mark.django_db(transaction=True)
def test_write_index():
assert True
@pytest.mark.django_db(transaction=True)
def test_update_static_index_thread(static_list, settings):
settings.STATIC_INDEX_MESSAGES_PER_PAGE = 10
today = datetime.datetime.today()
old_thread = static_list.thread_set.filter(date__year=2015).first()
rebuild_static_index()
files = os.listdir(os.path.join(settings.STATIC_INDEX_DIR, 'public'))
MessageFactory.create(email_list=static_list, subject="tribulations", date=today, thread=old_thread)
update_static_index(static_list)
path = os.path.join(settings.STATIC_INDEX_DIR, static_list.name, '2015.html')
with open(path) as f:
data = f.read()
assert 'tribulations' in data
@pytest.mark.django_db(transaction=True)
def test_update_static_index_date(static_list, settings):
settings.STATIC_INDEX_MESSAGES_PER_PAGE = 10
date = datetime.datetime(2017,5,1)
rebuild_static_index()
MessageFactory.create(email_list=static_list, subject="tribulations", date=date)
update_static_index(static_list)
path = os.path.join(settings.STATIC_INDEX_DIR, static_list.name, '2017-05.html' )
with open(path) as f:
data = f.read()
assert 'tribulations' in data
@pytest.mark.django_db(transaction=True)
def test_build_msg_pages(messages, static_dir):
email_list = EmailList.objects.get(name='pubone')
message = messages.filter(email_list=email_list).first()
build_msg_pages(email_list)
assert 'pubone' in os.listdir(settings.STATIC_INDEX_DIR)
assert message.hashcode.strip('=') + '.html' in os.listdir(os.path.join(settings.STATIC_INDEX_DIR, 'pubone'))
"""
| [
11748,
12972,
9288,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
6738,
12972,
22766,
1330,
9485,
20746,
198,
6738,
17590,
1330,
9570,
8053,
22810,
11,
16000,
22810,
11,
14122,
22810,
198,
6738,
25962,
17474,
13,
17474,
13,
33571,
... | 2.63163 | 1,037 |
# Copyright (c) 2015 Microsoft Corporation
"""
>>> from z3 import *
>>> x = Int('x')
>>> s = MCSatCore()
>>> s.check()
sat
>>> s.add(x != x)
>>> s.check() # doctest: +ELLIPSIS
Traceback (most recent call last):
Z3Exception: ...
>>> s = MCSatCore()
>>> s.add_tactic('simplify')
>>> s.check()
sat
>>> s.add(x != x)
>>> s.check()
unsat
>>> s = MCSatCore()
>>> p, q, r = Bools('p q r')
>>> s.add_tactic('simplify')
>>> s.add_tactic('solve-eqs')
>>> s.set(unsat_core=True)
>>> s.add_and_track(x == 1, p)
>>> s.add_and_track(x < 0, q)
>>> s.add_and_track(x > -10, r)
>>> s.check()
unsat
>>> core = s.unsat_core()
>>> len(core)
2
>>> eq_core(core, [p, q])
True
"""
if __name__ == "__main__":
import doctest
if doctest.testmod().failed:
exit(1)
| [
198,
2,
15069,
357,
66,
8,
1853,
5413,
10501,
198,
37811,
198,
33409,
422,
1976,
18,
1330,
1635,
198,
33409,
2124,
796,
2558,
10786,
87,
11537,
198,
33409,
264,
796,
337,
7902,
265,
14055,
3419,
198,
33409,
264,
13,
9122,
3419,
198,
... | 2.25 | 336 |
# Pok On Cheng 74157306 and Zongheng Ma 16285673. ICS 31 Lab sec 5. Lab Asst 2.
print('Part (c)')
print('How many hours?')
hours = int(input())
print('This many hours:', hours)
print('How many dollars per hour?')
rate = int(input())
print('This many dollars per hour: ', rate)
print('Weekly salary: ', hours * rate)
print(' ')
hours = int(input('How many hours?'))
print('This many hours:', hours)
rate = int(input('How many dollars per hours?'))
print('This many dollars per hour: ', rate)
print('Weekly salary: ', hours * rate)
print(' ')
name = input('Hello. What is your name? ')
print('Hello, ', name)
print('It\'s nice to meet you.')
age = int(input('How old are you? '))
print('Next year you will be', age + 1, 'years old.')
print('Good-bye!')
print('\n')
print('Part (d)')
KRONE_PER_EURO = 7.46
KRONE_PER_POUND = 8.81
KRONE_PER_DOLLAR = 5.50
print('Please provide this information:')
businessName = input('Business name: ')
numberOfEuros = int(input('Number of euros: '))
numberOfPounds = int(input('Number of pounds: '))
numberOfDollars = int(input('Number of dollars: '))
print(' ')
print('Copenhagen Chamber of Commerce')
print('Business name: ', businessName)
print(numberOfEuros, 'euros is', numberOfEuros * KRONE_PER_EURO, 'krone')
print(numberOfPounds, 'pounds is', numberOfPounds * KRONE_PER_POUND, 'krone')
print(numberOfDollars, 'dollars is', numberOfDollars * KRONE_PER_DOLLAR, 'krone')
print(' ')
print('Total krone: ', numberOfEuros * KRONE_PER_EURO + numberOfPounds * KRONE_PER_POUND + numberOfDollars * KRONE_PER_DOLLAR)
print('\n')
print('Part (e)')
from collections import namedtuple
Book = namedtuple('Book', 'title author year price')
favorite = Book('Adventures of Sherlock Holmes', 'Arthur Conan Doyle', 1892, 21.50)
another = Book('Memoirs of Sherlock Holmes', 'Arthur Conan Doyle', 1894, 23.50)
still_another = Book('Return of Sherlock Holmes', 'Arthur Conan Doyle', 1905, 25.00)
print('still_another.title')
print(still_another.title)
print(' ')
print('another.price')
print(another.price)
print(' ')
print("float((favorite.price + another.price + still_another.price) / 3)")
print(float((favorite.price + another.price + still_another.price) / 3))
print(' ')
print('favorite.year < 1900')
print(favorite.year < 1900)
print(' ')
still_another_change1 = Book('Return of Sherlock Holmes', 'Arthur Conan Doyle', 1905, 25.00 + 1)
print("still_another_change1 = Book('Return of Sherlock Holmes', 'Arthur Conan Doyle', 1905, 25.00 + 1)")
print(still_another_change1.price)
print(' ')
still_another_change2 = Book('Return of Sherlock Holmes', 'Arthur Conan Doyle', 1905, 25.00*(1 + 0.2))
print("still_another_change2 = Book('Return of Sherlock Holmes', 'Arthur Conan Doyle', 1905, 25.00*(1 + 0.2))")
print(still_another_change2.price)
print('\n')
print('Part (f)')
from collections import namedtuple
Animal = namedtuple('Animal', 'name species age weight favoriteFood')
elephant = Animal('Jumbo', 'Large Mammals', 50, 1000, 'Peanuts')
platypus = Animal('Perry', 'Semiaquatic Mammal', 7, 1.7, 'Shrimp')
print('elephant.weight < platypus.weight')
print(elephant.weight < platypus.weight)
print('\n')
print('Part (g)')
booklist = [favorite, another, still_another]
print('booklist[0].price < booklist[1].price')
print(booklist[0].price < booklist[1].price)
print(' ')
print('booklist[0].year > booklist[2].year')
print(booklist[0].year > booklist[2].year)
print('\n')
print('Part (h)')
from collections import namedtuple
Restaurant = namedtuple('Restaurant', 'name cuisine phone dish price')
# Restaurant attributes: name, kind of food served, phone number, best dish, price of that dish
RC = [
Restaurant("Thai Dishes", "Thai", "334-4433", "Mee Krob", 12.50),
Restaurant("Nobu", "Japanese", "335-4433", "Natto Temaki", 5.50),
Restaurant("Nonna", "Italian", "355-4433", "Stracotto", 25.50),
Restaurant("Jitlada", "Thai", "324-4433", "Paht Woon Sen", 15.50),
Restaurant("Nola", "New Orleans", "336-4433", "Jambalaya", 5.50),
Restaurant("Noma", "Modern Danish", "337-4433", "Birch Sap", 35.50),
Restaurant("Addis Ababa", "Ethiopian", "337-4453", "Yesiga Tibs", 10.50) ]
print('RC[2].name')
print(RC[2].name)
print(' ')
print('RC[0].cuisine == RC[3].cuisine')
print(RC[0].cuisine == RC[3].cuisine)
print(' ')
print('RC[-1].price')
print(RC[-1].price)
print(' ')
print('RC.sort()')
RC.sort()
print(RC[0])
print(RC[1])
print(RC[2])
print(RC[3])
print(RC[4])
print(RC[5])
print(RC[6])
print(' ')
print('RC.reverse()')
RC.reverse()
print(RC[0])
print(RC[1])
print(RC[2])
print(RC[3])
print(RC[4])
print(RC[5])
print(RC[6])
print(' ')
RC.sort()
rcc = [RC[0], RC[1]]
rcc.extend(RC[-2])
rcc.extend(RC[-1])
print(rcc)
print('\n')
print('Part (i)')
import turtle
s26 = turtle.Screen()
t26 = turtle.Turtle()
t26.forward(100)
t26.left(90)
t26.forward(100)
t26.left(90)
t26.forward(100)
t26.left(90)
t26.forward(100)
t26.left(90)
s26.exitonclick()
s27 = turtle.Screen()
t27 = turtle.Turtle()
t27.right(45)
t27.forward(100)
t27.right(90)
t27.forward(100)
t27.right(90)
t27.forward(100)
t27.right(90)
t27.forward(100)
t27.right(90)
t27.left(45)
s27.exitonclick()
sides = int(input('How many sides do you want(Must be bigger than 3): '))
angles = int(((sides - 2) * 180) / sides)
angles = 180 - angles
s28 = turtle.Screen()
t28 = turtle.Turtle()
while sides > 0:
t28.forward(100)
t28.left(angles)
sides = sides - 1
s28.exitonclick()
s30 = turtle.Screen()
t30 = turtle.Turtle()
t30.circle(125)
t30.penup()
t30.left(90)
t30.forward(25)
t30.right(90)
t30.pendown()
t30.circle(100)
t30.penup()
t30.left(90)
t30.forward(25)
t30.right(90)
t30.pendown()
t30.circle(75)
t30.penup()
t30.left(90)
t30.forward(25)
t30.right(90)
t30.pendown()
t30.circle(50)
t30.penup()
t30.right(90)
t30.forward(75)
t30.left(90)
t30.pendown()
s30.exitonclick()
s32 = turtle.Screen()
t32 = turtle.Turtle()
t32.penup()
t32.right(90)
t32.forward(109)
t32.left(90)
t32.pendown()
t32.circle(109)
t32.penup()
t32.left(90)
t32.forward(109)
t32.right(90)
t32.forward(200)
t32.pendown()
t32.circle(1)
s32.exitonclick()
print('\n')
print('Part (j)')
sj = turtle.Screen()
tj = turtle.Turtle()
##tj.penup()
##tj.right(90)
##tj.forward(150)
##tj.left(90)
##tj.pendown()
##tj.circle(150)
##tj.penup()
##tj.left(90)
##tj.forward(150)
##tj.right(90)
##tj.pendown()
tj.dot(300, "light blue")
tj.dot(150, "black")
tj.penup()
tj.left(180)
tj.forward(270)
tj.left(120)
tj.pendown()
tj.circle(320,120)
tj.left(60)
tj.circle(320,120)
sj.exitonclick()
| [
2,
8175,
1550,
27692,
8915,
18458,
20548,
290,
1168,
506,
31753,
6669,
1467,
26279,
45758,
13,
314,
7902,
3261,
3498,
792,
642,
13,
3498,
1081,
301,
362,
13,
201,
198,
4798,
10786,
7841,
357,
66,
8,
11537,
201,
198,
4798,
10786,
2437,... | 2.378148 | 2,819 |
'''
reloadall.py - transitively reloads nested modules
'''
import types
from importlib import reload
if __name__ == '__main__':
import reloadall
reload_all(reloadall)
| [
7061,
6,
198,
220,
220,
220,
18126,
439,
13,
9078,
532,
1007,
1800,
306,
18126,
82,
28376,
13103,
198,
7061,
6,
198,
198,
11748,
3858,
198,
198,
6738,
1330,
8019,
1330,
18126,
198,
220,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12... | 2.830769 | 65 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 01. 奇数文字列の取り出し
print(input()[::2])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
5534,
13,
10263,
98,
229,
46763,
108,
23877,
229,
27764,
245,
26344,
245,
15474,
237,
244,
28255,
49035,
118,
... | 1.527273 | 55 |
from keras.engine.topology import Layer
class SpatialPyramidPoling(Layer):
"""Spatial pyramid pooling layer for 2D inputs.
See Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,
K. He, X. Zhang, S. Ren, J. Sun
# Arguments
pool_list: list of int
# Input shape
or 4D tensor with shape:
(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
2D tensor with shape:
(samples, channels * sum([i * i for i in pool_list])
"""
| [
6738,
41927,
292,
13,
18392,
13,
4852,
1435,
1330,
34398,
198,
198,
4871,
1338,
34961,
20519,
20255,
8017,
278,
7,
49925,
2599,
198,
220,
220,
220,
37227,
4561,
34961,
27944,
5933,
278,
7679,
329,
362,
35,
17311,
13,
198,
220,
220,
22... | 2.567308 | 208 |
from setuptools import find_packages, setup
pkgs = find_packages('src')
name = pkgs[0]
setup(
author_email='tomoki.nakamaru@gmail.com',
author='Tomoki Nakamaru',
entry_points={'console_scripts': [f'{name}={name}.__main__:bibtefmt']},
install_requires=['pyparsing==3.0.3'],
license='MIT',
long_description_content_type='text/markdown',
long_description=readme(),
name=name,
package_dir={'': 'src'},
packages=pkgs,
version='.'.join(version())
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
628,
628,
198,
35339,
82,
796,
1064,
62,
43789,
10786,
10677,
11537,
198,
198,
3672,
796,
279,
10025,
82,
58,
15,
60,
198,
198,
40406,
7,
198,
220,
220,
220,
1772,
62,
12888,... | 2.440594 | 202 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the JobIdProvider class.
"""
import abc
import hashlib
import random
import sys
import time
from googlecloudsdk.calliope import exceptions
class JobIdGenerator(object):
"""Base class for job id generators."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def _DoGenerate(self, job_configuration):
"""Generates a job_id to use for job_configuration."""
class JobIdGeneratorRandom(JobIdGenerator):
"""Generates random job ids."""
class JobIdGeneratorFingerprint(JobIdGenerator):
"""Generates job ids that uniquely match the job config."""
def _Hash(self, config, sha1):
"""Computes the sha1 hash of a dict."""
keys = config.keys()
# Python dict enumeration ordering is random. Sort the keys
# so that we will visit them in a stable order.
keys.sort()
for key in keys:
sha1.update('{0}'.format(key))
v = config[key]
if isinstance(v, dict):
self._Hash(v, sha1)
elif isinstance(v, list):
for inner_v in v:
self._Hash(inner_v, sha1)
else:
sha1.update('{0}'.format(v))
class JobIdGeneratorIncrementing(JobIdGenerator):
"""Generates job ids that increment each time we're asked."""
class JobIdProvider(object):
"""Defines a method providing user-specified or randomly generated job IDs.
"""
def GetJobId(self, job_id_flag, fingerprint_job_id_flag):
"""Returns the job id or job generator from the flags."""
if fingerprint_job_id_flag and job_id_flag:
raise exceptions.InvalidArgumentException(
'The --fingerprint-job-id flag ',
'cannot be specified with the --job-id flag')
if fingerprint_job_id_flag:
return JobIdGeneratorFingerprint()
elif job_id_flag is None:
return JobIdGeneratorIncrementing(JobIdGeneratorRandom())
elif job_id_flag:
return job_id_flag
else:
# User specified a job id, but it was empty. Let the
# server come up with a job id.
return None
| [
2,
15069,
1946,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.924036 | 882 |
from enum import Enum, auto
from ..exceptions import ModelResolverError
from ..filters import uppercase_first_letter
from .flattener import JsonSchemaFlattener
from .utils import BASE, fragment_encode
UNDEFINED = "undefined"
MULTIPLE = "multiple"
FORMAT_DEFAULT = "default"
class ModelResolver:
"""This class takes in a flattened schema map (output of the JsonSchemaFlattener),
and builds a full set of models using the pseudo-types ``primitive``, ``"dict"``,
``"list"``, ``"set"``, and ``"model"``` for container information,
and JSON Schema types ``"string"``, ``"integer"``, ``"boolean"``, ``"number"``,
plus an undefined sentinel value (``"undefined"``) for value types.
This makes it easy for plugins to map the resolved (pseudo-)types to language
types during templating.
"""
def _models_from_refs(self):
"""Creates a model name for each ref_path in the flattened schema map."""
for ref_path in self.flattened_schema_map.keys():
# cannot convert this into list comprehension as self._models is used
# during the loop
self._models[ref_path] = self._get_model_name_from_ref(ref_path)
def _get_model_name_from_ref(self, ref_path):
"""Given a json schema ref, returns the best guess at a model name."""
if ref_path == ():
return self._base_model_name
class_name = base_class_from_ref(ref_path)
try:
dupe_path = next(
path for path, name in self._models.items() if name == class_name
)
except StopIteration:
return class_name
raise ModelResolverError(
"Model name conflict. "
f"'{class_name}' found at {dupe_path} and {ref_path}"
)
def resolve_models(self):
"""Iterate through each schema and create a model mapping.
:return: A mapping of models names and properties. Properties are
a mapping of property names and property types.
.. seealso:: :func:`_schema_to_lang_type`
"""
models = {}
for ref_path, sub_schema in self.flattened_schema_map.items():
class_name = self._models[ref_path]
models[class_name] = {
prop_name: self._schema_to_lang_type(prop_schema)
for prop_name, prop_schema in sub_schema["properties"].items()
}
return models
def _schema_to_lang_type(self, property_schema):
"""Return the language-specific type for a flattened schema.
If the schema is a ref, the class is determined from ``_models``.
"""
try:
ref_path = property_schema["$ref"]
except KeyError:
pass # we are not dealing with a ref, move on
else:
return ResolvedType(ContainerType.MODEL, self._models[ref_path])
schema_type = property_schema.get("type", "object")
if isinstance(
schema_type, list
): # generate a generic type Object which will be casted on the client side
if len(set(schema_type)) > 1:
return self._get_multiple_lang_type(MULTIPLE)
schema_type = schema_type[0]
if schema_type == "array":
return self._get_array_lang_type(property_schema)
if schema_type == "object":
return self._get_object_lang_type(property_schema)
return self._get_primitive_lang_type(schema_type, property_schema)
@staticmethod
def _get_array_container_type(property_schema):
"""Return True if an array has array semantics, or False for set semantics."""
insertion_order = property_schema.get("insertionOrder", True)
unique_items = property_schema.get("uniqueItems", False)
if insertion_order or not unique_items:
return ContainerType.LIST
return ContainerType.SET
@staticmethod
@staticmethod
def _get_object_lang_type(self, property_schema):
"""Resolves an object type.
* In JSON, objects must have string keys, so we are resolving the value type.
* If patternProperties is defined, the value type is determined by the schema
for the pattern. We do not care about the pattern itself, since that is only
used for validation.
* The object will never have nested properties, as that was taken care of by
flattening the schema (this isn't at all obvious from the code).
* If there are no patternProperties, it must be an arbitrary JSON type, so
we set the value type to the UNDEFINED constant for language implementations
to distinguish it from a JSON object.
"""
items = self._get_primitive_lang_type(UNDEFINED, property_schema)
try:
pattern_properties = list(property_schema["patternProperties"].items())
except KeyError:
# no pattern properties == undefined type
pass
else:
# multiple pattern props == bad schema definition == undefined type
if len(pattern_properties) == 1:
items = self._schema_to_lang_type(pattern_properties[0][1])
return ResolvedType(ContainerType.DICT, items)
def base_class_from_ref(ref_path):
"""This method determines the class_name from a ref_path
It uses json-schema heuristics to properly determine the class name
>>> base_class_from_ref(("definitions", "Foo"))
'Foo'
>>> base_class_from_ref(("properties", "foo", "items"))
'Foo'
>>> base_class_from_ref(("properties", "foo", "items", "patternProperties", "a"))
'Foo'
>>> base_class_from_ref(("properties", "items"))
'Items'
>>> base_class_from_ref(("properties", "patternProperties"))
'PatternProperties'
>>> base_class_from_ref(("properties", "properties"))
'Properties'
>>> base_class_from_ref(("definitions",))
'Definitions'
>>> base_class_from_ref(("definitions", "properties"))
'Properties'
>>> base_class_from_ref(()) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
core.exceptions.ModelResolverError:
Could not create a valid class from schema at '#'
"""
parent_keywords = ("properties", "definitions")
schema_keywords = ("items", "patternProperties", "properties")
ref_parts = ref_path[::-1]
ref_parts_with_root = ref_parts + (BASE,)
for idx, elem in enumerate(ref_parts):
parent = ref_parts_with_root[idx + 1]
if parent in parent_keywords or (
elem not in schema_keywords and parent != "patternProperties"
):
return uppercase_first_letter(elem.rpartition("/")[2])
raise ModelResolverError(
"Could not create a valid class from schema at '{}'".format(
fragment_encode(ref_path)
)
)
| [
6738,
33829,
1330,
2039,
388,
11,
8295,
198,
198,
6738,
11485,
1069,
11755,
1330,
9104,
4965,
14375,
12331,
198,
6738,
11485,
10379,
1010,
1330,
334,
39921,
589,
62,
11085,
62,
9291,
198,
6738,
764,
2704,
1078,
877,
1330,
449,
1559,
270... | 2.519809 | 2,726 |
# encoding: UTF-8
import zmq
import itchat
import time
import os
import json
from befh.subscription_manager import SubscriptionManager
from befh.OkcoinAPI.OkcoinMarket import OkcoinMarket
from befh.FinexAPI.BitfinexMarket import BitfinexMarket
import logging
import re
import random
import numpy as np
if __name__ == '__main__':
# 载入订阅交易品种信息
fileName = "subscription.ini"
path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
fileName = os.path.join(path, fileName)
subscription_instmts = SubscriptionManager(fileName).get_subscriptions()
subscription_dict = dict([('_'.join([v.exchange_name, v.instmt_name]), v) for v in subscription_instmts])
# client字典
TradeClients = {}
client = BitfinexMarket()
client.subscription_dict = subscription_dict
TradeClients[client.exchange] = client
client = OkcoinMarket()
client.subscription_dict = subscription_dict
TradeClients[client.exchange] = client
market_feed_name = "marketfeed"
context = zmq.Context()
sock = context.socket(zmq.SUB)
# sock.connect("ipc://%s" % market_feed_name)
sock.connect("tcp://127.0.0.1:6001")
sock.setsockopt_string(zmq.SUBSCRIBE, '')
# in-memory database
exchanges_snapshot = {}
arbitrage_record = {}
itchatsendtime = {}
globalvar = {"threshhold": 50000, "BTC": 0.01, "ETH": 0.01, "LTC": 0.1}
# itchat
# itchat.auto_login(hotReload=True)
# # itchat.send("test", toUserName="filehelper")
print("Started...")
while True:
# ret = sock.recv_pyobj()
# message = sock.recv()
mjson = sock.recv_json()
exchanges_snapshot[mjson["exchange"] + "_" + mjson["instmt"]] = mjson
if mjson["exchange"] in TradeClients.keys():
TradeClients[mjson["exchange"]].instmt_snapshot[mjson["instmt"]] = mjson
try:
Exchange3Arbitrage(globalvar, mjson, exchanges_snapshot, TradeClients, "OkCoinCN", "Bitfinex", "BTC", "ETH",
0.01, 0.01, 0.01)
Exchange3Arbitrage(globalvar, mjson, exchanges_snapshot, TradeClients, "OkCoinCN", "Bitfinex", "BTC", "LTC",
0.01, 0.1, 0.01)
except Exception as e:
logging.exception(e)
| [
2,
21004,
25,
41002,
12,
23,
198,
11748,
1976,
76,
80,
198,
11748,
340,
17006,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
307,
69,
71,
13,
7266,
33584,
62,
37153,
1330,
3834,
33584,
13511,
198,
6738,
307,
69,
... | 2.307223 | 983 |
"""
PROBLEM
Leonhard Euler was born on 15 April 1707.
Consider the sequence 1504170715041707n mod 4503599627370517.
An element of this sequence is defined to be an Eulercoin if it is strictly smaller than all previously found Eulercoins.
For example, the first term is 1504170715041707 which is the first Eulercoin. The second term is 3008341430083414 which
is greater than 1504170715041707 so is not an Eulercoin. However, the third term is 8912517754604 which is small enough
to be a new Eulercoin.
The sum of the first 2 Eulercoins is therefore 1513083232796311.
Find the sum of all Eulercoins.
ANSWER:
1517926517777556
Solve time ~0.003 seconds
"""
import unittest
from util.utils import timeit
# Explanation from ProjectEuler user RubiksCube:
# After brute forcing the first 15 Eulercoins I tried the Euclidean algorithm and found that I got every coin and the
# distance between the coins from the step-by-step in the Euclidean algorithm.
#
# Short example:
# Start with 2 steps and use the last right hand side to get the first coin.
# 4503599627370517 = 1504170715041707 * 2 + 1495258197287103
# 1504170715041707 = 1495258197287103 * 1 + 8912517754604
#
# First coin: 1495258197287103 * 1 + 8912517754604 = 1504170715041707
#
# Do two steps again:
# 1495258197287103 = 8912517754604 * 167 + 6867732268235
# 8912517754604 = 6867732268235 * 1 + 2044785486369
#
# Second coin: 6867732268235 * 1 + 2044785486369 = 8912517754604
#
# Do two more steps, note the "2" giving us 2 coins.
# 6867732268235 = 2044785486369 * 3 + 733375809128
# 2044785486369 = 733375809128 * 2 + 578033868113
#
# Third coin: 733375809128 * 2 + 578033868113 = 2044785486369
# Fourth coin: 733375809128 * 1 + 578033868113 = 1311409677241
#
# Repeat until the Euclidean algorithm is finished
if __name__ == '__main__':
unittest.main()
# [(euler_coin, n)]
# [(1504170715041707, 1), (8912517754604, 3), (2044785486369, 506), (1311409677241, 2527), (578033868113, 4548),
# (422691927098, 11117), (267349986083, 17686), (112008045068, 24255), (68674149121, 55079), (25340253174, 85903),
# (7346610401, 202630), (4046188430, 724617), (745766459, 1246604), (428410324, 6755007), (111054189, 12263410),
# (15806432, 42298633), (15397267, 326125654), (14988102, 609952675), (14578937, 893779696), (14169772, 1177606717),
# (13760607, 1461433738), (13351442, 1745260759), (12942277, 2029087780), (12533112, 2312914801), (12123947, 2596741822),
# (11714782, 2880568843), (11305617, 3164395864), (10896452, 3448222885), (10487287, 3732049906), (10078122, 4015876927),
# (9668957, 4299703948), (9259792, 4583530969), (8850627, 4867357990), (8441462, 5151185011), (8032297, 5435012032),
# (7623132, 5718839053), (7213967, 6002666074), (6804802, 6286493095), (6395637, 6570320116), (5986472, 6854147137),
# (5577307, 7137974158), (5168142, 7421801179), (4758977, 7705628200), (4349812, 7989455221), (3940647, 8273282242),
# (3531482, 8557109263), (3122317, 8840936284), (2713152, 9124763305), (2303987, 9408590326), (1894822, 9692417347),
# (1485657, 9976244368), (1076492, 10260071389), (667327, 10543898410), (258162, 10827725431), (107159, 21939277883),
# (63315, 54990108218), (19471, 88040938553), (14569, 297173645994), (9667, 506306353435), (4765, 715439060876),
# (4628, 1640010829193), (4491, 2564582597510), (4354, 3489154365827), (4217, 4413726134144), (4080, 5338297902461),
# (3943, 6262869670778), (3806, 7187441439095), (3669, 8112013207412), (3532, 9036584975729), (3395, 9961156744046),
# (3258, 10885728512363), (3121, 11810300280680), (2984, 12734872048997), (2847, 13659443817314), (2710, 14584015585631),
# (2573, 15508587353948), (2436, 16433159122265), (2299, 17357730890582), (2162, 18282302658899), (2025, 19206874427216),
# (1888, 20131446195533), (1751, 21056017963850), (1614, 21980589732167), (1477, 22905161500484), (1340, 23829733268801),
# (1203, 24754305037118), (1066, 25678876805435), (929, 26603448573752), (792, 27528020342069), (655, 28452592110386),
# (518, 29377163878703), (381, 30301735647020), (244, 31226307415337), (107, 32150879183654), (77, 65226330135625),
# (47, 98301781087596), (17, 131377232039567), (4, 295829915031105), (3, 1347772343115958), (2, 2399714771200811),
# (1, 3451657199285664)]
| [
37811,
198,
4805,
9864,
2538,
44,
198,
198,
36185,
10424,
412,
18173,
373,
4642,
319,
1315,
3035,
1596,
2998,
13,
198,
19626,
262,
8379,
6640,
38547,
2998,
8628,
38547,
2998,
77,
953,
18523,
2327,
38565,
1983,
20167,
48170,
13,
198,
202... | 2.443866 | 1,728 |
# Generated by Django 2.1 on 2020-12-23 15:07
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
319,
12131,
12,
1065,
12,
1954,
1315,
25,
2998,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
628
] | 2.904762 | 42 |
"""A.i People Counter"""
"""
Copyright [2020] [MEHUL SOLANKI]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import time
import cv2
import numpy as np
import logging as log
from argparse import ArgumentParser
from inference_local import Network
#Linux CPU_EXTENSION Path Openvino V2019R3
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
BOXCOLOR = {'RED':(0,0,255),'GREEN':(0,255,0),'BLUE':(255,0,0),'WHITE':(255,255,255),'BLACK':(0,0,0)}
def build_argparser():
"""
Parse command line arguments.
:return: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-m", "--model", required=True, type=str,
help="Path to an xml file with a trained model.")
parser.add_argument("-i", "--input", required=True, type=str,
help="Path to image, video file or for webcam just type CAM")
parser.add_argument("-fps", "--fps", required=True, type=int,
help="FPS of Video or webcam, required to get perfect duration calculations.")
parser.add_argument("-l", "--cpu_extension", required=False, type=str,
default=CPU_EXTENSION,
help="MKLDNN (CPU)-targeted custom layers."
"Absolute path to a shared library with the"
"kernels impl.")
parser.add_argument("-d", "--device", type=str, default="CPU",
help="Specify the target device to infer on: "
"CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device "
"specified (CPU by default)")
parser.add_argument("-pt", "--prob_threshold", type=float, default=0.5,
help="Probability threshold for detections filtering"
"(0.5 by default)")
parser.add_argument("-c", "--box_color", type=str, default="WHITE",
help="Color of bounding box[RED,GREEN,BLUE,WHITE,RED]"
"(WHITE by default)")
parser.add_argument("-ap", "--alarm_people", type=int, default=1,
help="Alarm when certain no people detected exceed the limit"
"(1 by default)")
parser.add_argument("-ad", "--alarm_duration", type=int, default=15,
help="Alarm when time of person stayed exceed the limit"
"(15sec. by default)")
parser.add_argument("-tv", "--toggle_video", type=str, default="ON",
help="Toggle Video feed on or off [ON or OFF]"
"(on by default)")
parser.add_argument("-ci", "--cam_id", type=int, default=0,
help="input web Camera id"
"(0 by default)")
parser.add_argument("-db", "--delay_band", type=int, default=1000,
help="input delay band (Millis) to fix counting in case of video fluctuation or frame loss"
"(1000 millis by default)")
parser.add_argument("-wv", "--write_video", type=str, default="N",
help="write video to local file Y or N [Y or N]"
"(on by default)")
return parser
def check_input_type(input, id):
"""
check input is video,image or cam
"""
#print("input",input)
checkInputargs = input #string from args.input
checkError = checkInputargs.find(".") #Verify If there is extension or other than CAM
error_flag = False
image_flag = False
cap = None
if checkInputargs == "CAM": # Check for cam
cap = cv2.VideoCapture(id) # Assign CAM ID
print("Performing inference on webcam video...")
elif checkError is -1: # Check for if there any extension
print("Error: invalid input or currupted file") # Error for no extension
print("Use -h argument for help")
error_flag = True
else:
path,ext= checkInputargs.rsplit(".",1) #find extension
if ext == "bmp" or ext == "jpg": #supporeted ext.
print("Performing inference on single image...")
cap = cv2.VideoCapture(input)
image_flag = True
elif ext == "mp4" or ext == "MP4": #if not image feed video
cap = cv2.VideoCapture(input) #Load local stream
print("Performing inference on local video...")
else:
print("Image/Video formate not supported")
error_flag = True
return cap, error_flag, image_flag
def draw_boxes(frame, result, width, height, color, prob_threshold):
'''
Draw bounding boxes onto the frame.
'''
countBox = 0
countmultipeople = 0
for box in result[0][0]: # Output shape is 1x1x100x7
conf = box[2]
if conf >= prob_threshold:
countBox = 1
countmultipeople += 1
xmin = int(box[3] * width)
ymin = int(box[4] * height)
xmax = int(box[5] * width)
ymax = int(box[6] * height)
label = "Person"+str(countmultipeople)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 1) #main rect.
cv2.rectangle(frame, (xmin, ymin), (xmin+90, ymin+10), color, -1) # Text rect.
cv2.putText(frame, label, (xmin,ymin+10),cv2.FONT_HERSHEY_PLAIN, 0.8, BOXCOLOR['BLACK'], 1)
return frame, countBox, countmultipeople
def draw_framelinegreen(frame,height,width): #Better to pass Color parameter
"""
Draw normal Green frame on video
"""
# Draw line top left and right
cv2.line(frame, (0, 0), (0,int(height/10)), (0,255,0),10)#line top teft horizontal.
cv2.line(frame, (0, 0), (int(height/10),0), (0,255,0),10)#line top left vertical.
cv2.line(frame, (width, 0), (width-int(height/10),0), (0,255,0),10)#line top right horizontal.
cv2.line(frame, (width, 0), (width,int(height/10)), (0,255,0),10)#line top right vertical.
# Draw line bottom left and right
cv2.line(frame, (0, height), (0,height-int(height/10)), (0,255,0),10)#line.
cv2.line(frame, (0, height), (int(height/10),height), (0,255,0),10)#line.
cv2.line(frame, (width, height), (width-int(height/10),height), (0,255,0),10)#line.
cv2.line(frame, (width, height), (width,height-int(height/10)), (0,255,0),10)#line.
return frame
def draw_framelinered(frame,height,width): #Better to pass Color parameter
"""
Draw alert red frame on video
"""
# Draw line top left and right
cv2.line(frame, (0, 0), (0,int(height/10)), (0,0,255),10)#line top teft horizontal.
cv2.line(frame, (0, 0), (int(height/10),0), (0,0,255),10)#line top left vertical.
cv2.line(frame, (width, 0), (width-int(height/10),0), (0,0,255),10)#line top right horizontal.
cv2.line(frame, (width, 0), (width,int(height/10)), (0,0,255),10)#line top right vertical.
# Draw line bottom left and right
cv2.line(frame, (0, height), (0,height-int(height/10)), (0,0,255),10)#line.
cv2.line(frame, (0, height), (int(height/10),height), (0,0,255),10)#line.
cv2.line(frame, (width, height), (width-int(height/10),height), (0,0,255),10)#line.
cv2.line(frame, (width, height), (width,height-int(height/10)), (0,0,255),10)#line.
return frame
def selectBoxcolor(color):
"""
To change bounding box color
"""
if color == 'RED':
color = BOXCOLOR['RED']
elif color == 'GREEN':
color = BOXCOLOR['GREEN']
elif color == 'BLUE':
color = BOXCOLOR['BLUE']
elif color == 'WHITE':
color = BOXCOLOR['WHITE']
elif color == 'BLACK':
color = BOXCOLOR['BLACK']
#print("Color Selected:",color)
return color
def infer_on_stream(args):
"""
Initialize the inference network, stream video to network,
and output stats and video.
:param args: Command line arguments parsed by `build_argparser()`
:param client: MQTT client
:return: None
"""
# Initialise the class
infer_network = Network()
# Set Probability threshold for detections
prob_threshold = args.prob_threshold
# ### TODO: Load the model through `infer_network` ###
infer_network.load_model(args.model, args.device, args.cpu_extension)
net_input_shape = infer_network.get_input_shape()
print("Selected Network input Layer type is",type(net_input_shape),"And shape is",net_input_shape)
print("Required input img size W",net_input_shape[3],"H",net_input_shape[2])
# ### TODO: Handle the input stream ###
# cap = cv2.VideoCapture(args.input)
cap, error_flag, image_flag = check_input_type(args.input, args.cam_id) #call function
#print("Cap debug",cap, error_flag, image_flag) #debug return
if error_flag: # Check for invalid file extension
print("Program stopped")
return
elif image_flag: #check for image
INPUT_IMAGE = args.input
img = cv2.imread(INPUT_IMAGE)
if (type(img) is not np.ndarray): #check for if image read empty same as img.empty()
print("Error: Invalid image or path")
print("Use -h argument for help")
return
else:
cap.open(args.input)
# Get input feed height and width
img_width = int(cap.get(3))
img_height = int(cap.get(4))
if img_width < 1 or img_width is None: # If input path is wrong
print("Error! Can't read Input: Check path")
return
print("feed frame size W",img_width,"H",img_height)
# Initialize video writer if video mode
if args.write_video is "Y": # only if args given Y
if not image_flag:
# Video writer Linux
print("---Opencv video writer debug LIN---")
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter('out.mp4', 0x00000021, 30, (img_width,img_height))
print("-------------------------------")
# Initialized varible utilized inside loop
frame_count = 0
total_people_count = 0
last_state = 0
delay_on = 0
delay_off = (time.time() * 1000) # Initialize timer before loop to get actual time
delay_diff_on = 0
delay_diff_off = 0
duration = 0
duration_timebase = 0
duration_fpsbase = 0
count_people_image = 0
# Second counting timer initialized
sec_on = (time.time() * 1000) # Timer for update stat on terminal START
sec_diff = 0
cv_drawstate_time_s = 0
cv_drawstate_time_e = 0
count_flag = False
frame_count_onstate = 0
frame_count_offstate = 0
# Accuracy Log
log_acount = 0
log_frame_no = []
log_person_counted = []
log_duration_fpsbase = []
log_duration_timebase = []
log_infer_time = []
# error_log
log_ecount = 0 #counter for error log in case of multiple box count
log_multicounted = []
# ### TODO: Loop until stream is over ###
while cap.isOpened():
frame_count += 1 # Global frame Count no of frame processed.
# Read the next frame
flag, frame = cap.read()
if not flag:
break
key_pressed = cv2.waitKey(1)
### TODO: Read from the video capture ###
### TODO: Pre-process the image as needed ###
p_frame = preprocess_frame(frame,net_input_shape[3],net_input_shape[2]) #from extracted input function
### TODO: Start asynchronous inference for specified request ###
infer_network.exec_net(p_frame)
### TODO: Wait for the result ###
inferreq_start_time = (time.time() * 1000) # Timer for inference START
if infer_network.wait() == 0:
inferreq_end_time = (time.time() * 1000) - inferreq_start_time # Timer for inference END
log_infer_time.append(float("{:.2f}".format(inferreq_end_time)))
#print(inferreq_end_time)
### TODO: Get the results of the inference request ###
blob, result = infer_network.get_output()
# If model outputs multiple blob, print available blob infirmation
if frame_count == 1: # Print only Once
for name,output_ in blob.items(): #Find the possible BLOBS for name,
print ("The name of available blob is:", name)
### TODO: Extract any desired stats from the results ###
color = selectBoxcolor(args.box_color)
cv_drawboxtime_s = (time.time() * 1000) # Timer for drawing box on frame START
frame, count_box, countmultipeople = draw_boxes(frame, result, img_width, img_height, color, args.prob_threshold)
cv_drawboxtime_e = (time.time() * 1000) - cv_drawboxtime_s #Timer for drawing box on frame END
count_people_image = countmultipeople # Variable For image stat only
### TODO: Calculate and send relevant information on ###
if count_box != last_state: #Anythinkg under this will executed only once if state changes.
# print("I am In")
log_acount += 1 # increase stat change counter
if count_box == 1:
# print("I am in 1")
count_flag = True # Flag for verify if counting
delay_on = (time.time() * 1000) # Timer for on delay START
delay_diff_off = (time.time() * 1000) - delay_off # Timer for off delay END
delay_diff_on = 0 # Timer for on delay RESET
frame_count_onstate = frame_count # Frame count is Global FPS counter
frame_count_offstate = frame_count - frame_count_offstate # Calculates the difference
else:
# print("I am in 0")
count_flag = False
delay_diff_on = (time.time() * 1000) - delay_on # Timer for on delay END
delay_off = (time.time() * 1000) # Timer for off delay START
delay_diff_off = 0 # Timer for off delay RESET
frame_count_onstate = frame_count - frame_count_onstate # Calculates the difference
frame_count_offstate = frame_count
# For Debug if state changes then only update values
# print("update on",delay_diff_on)
# print("update off",delay_diff_off)
# print(['frame_count_onstate: '+ str(frame_count_onstate), 'frame_count_offstate: '+ str(frame_count_offstate)])
if delay_diff_on > args.delay_band:
total_people_count += 1 # Debug is placed above because count is not added yet.
duration_timebase = delay_diff_on / 1000 # Convert to Sec.
duration_fpsbase = frame_count_onstate / args.fps # Local use
duration = duration_fpsbase # global set
# Debug Delay difference Update only when counting ++
# print("count++ "+ " DDON: " + str("{:.2f}".format(delay_diff_on)) + " DDOF: " + str("{:.2f}".format(delay_diff_off)),
# "duration: " + str("{:.2f}".format(duration)) + "Sec.") # Debug When count++
# Debug Count status Update only when counting ++
# print(['FrameNo:'+str(frame_count),'CurrentCount: '+
# str(countmultipeople),'TotalCount: '+str(total_people_count),'duration_timebase: '+str("{:.2f}".format(duration_timebase))])
# print('duration_fpsbase: '+ str(frame_count_onstate / args.fps))
# Accuracy log, individual list log, termianl friendly
log_person_counted.append(total_people_count)
log_duration_timebase.append("{:.2f}".format(duration_timebase))
log_duration_fpsbase.append(duration_fpsbase)
log_frame_no.append(frame_count) # Log frame no of video
last_state = count_box
# state log for all variable changes when stat changes
# Debug if state changes 1 or 0 everytime, delay diff On/Off changes
# print(['Instate: '+ str(count_box),'delaydifOn: '+ str("{:.2f}".format(delay_diff_on)),
# 'delaydifOff: '+ str("{:.2f}".format(delay_diff_off))])
# print(['FrameNo:'+str(frame_count),'CurrentCount: '+
# str(countmultipeople),'TotalCount: '+str(total_people_count),'duration: '+str("{:.2f}".format(duration))])
# print() # Add blank print for space
else:
if countmultipeople not in (0,1): #In case of multiple people detected
# print("Multi count detected:",countmultipeople)
log_ecount += 1 # Increase error counter
# Nested list Frame and multipeople people count
log_multicounted.append(['F: '+ str(frame_count) + ' C: ' + str(countmultipeople)])
### This part needed to be optimized
if args.toggle_video is "ON": # If video feed is off stop unnecessory processing
cv_drawstate_time_s = (time.time() * 1000) # TImer for draw stat on frame START
# Draw inference time on image
label = "Inference time: " + str("{:.2f}".format(inferreq_end_time)) + "ms" #string label
cv2.putText(frame, label, (15,20),cv2.FONT_HERSHEY_COMPLEX, 0.4, BOXCOLOR['BLUE'], 1)
label1 = "Total people count: " + str(total_people_count) #string label
if image_flag or countmultipeople > 1:
label1 = "Total people count: " + str(count_people_image) #string label
else:
label1 = "Total people count: " + str(total_people_count)
cv2.putText(frame, label1, (15,30),cv2.FONT_HERSHEY_COMPLEX, 0.4, BOXCOLOR['BLUE'], 1)
if countmultipeople > 1 or image_flag is True:
label2 = "Average Time stayed: N/A"
else:
label2 = "Average Time stayed: " + str("{:.2f}".format(duration)) + "Sec." #string label
cv2.putText(frame, label2, (15,40),cv2.FONT_HERSHEY_COMPLEX, 0.4, BOXCOLOR['BLUE'], 1)
# People count exceed alarm
if countmultipeople > args.alarm_people or duration > args.alarm_duration:
draw_framelinered(frame,img_height,img_width)
if countmultipeople > args.alarm_people:
label3 = "Alarm: people count limit exceeded! limit: "+ str(args.alarm_people) #string label
cv2.putText(frame, label3, (15,50),cv2.FONT_HERSHEY_COMPLEX, 0.4, BOXCOLOR['RED'], 1)
else:
label4 = "Alarm: Person stayed longer! limit: " + str(args.alarm_duration) + "Sec."#string label
cv2.putText(frame, label4, (15,60),cv2.FONT_HERSHEY_COMPLEX, 0.4, BOXCOLOR['RED'], 1)
else:
draw_framelinegreen(frame,img_height,img_width)
# Draw cv process time
label5 = "CV Frame process time: " + str("{:.2f}".format(cv_drawboxtime_e + cv_drawstate_time_e)) + "ms" #string label
cv2.putText(frame, label5, (15,70),cv2.FONT_HERSHEY_COMPLEX, 0.4, BOXCOLOR['BLUE'], 1)
cv_drawstate_time_e = (time.time() * 1000) - cv_drawstate_time_s # TImer for draw stat on frame END
else:
# Stats of time of cv processing on image frame
sec_diff = (time.time() * 1000) - sec_on # Timer for update stat on terminal END
# print("time in ms: ",sec_diff) # Debug
if sec_diff > 1000 or sec_diff > 2000: # update stat roughly every 1 sec.
os.system('cls' if os.name == 'nt' else 'clear') # Clear the terminal
print() # Blank print
print("Video feed is OFF, Terminal will refresh every sec.")
print("Press ctlr+c to stop execution.")
# People count on terminal
if countmultipeople > 1:
print("Total people count: ",countmultipeople)
else:
print("Current people count: ", total_people_count)
print("Total people count: ",total_people_count)
print("Average Time stayed: ""{:.2f}".format(duration)," Sec.")
# Alarm on terminal
if countmultipeople > args.alarm_people or duration > args.alarm_duration:
if countmultipeople > args.alarm_people:
print("##### Alarm1 #####")
print("People count limit exceeded! limit: "+ str(args.alarm_people))
print("##################")
else:
print("##### Alarm2 #####")
print("Person stayed longer! limit: " + str(args.alarm_duration) + "Sec.")#string label
print("##################")
print("-----Stats for time -----")
print("Inference Time(ms):","{:.2f}".format(inferreq_end_time))
print("Draw boundingBox time(ms):", "{:.2f}".format(cv_drawboxtime_e))
print("Draw state time(ms):", "{:.2f}".format(cv_drawstate_time_e))
print("--------------------------")
sec_on = (time.time() * 1000) # Timer for update stat on terminal RESET
sec_diff = 0 # Timer for update stat on terminal RESET
# Adjusting timers with inference and cv processing time to fix counting and duration.
if count_flag:
# print("before",delay_on)
delay_on = delay_on + inferreq_end_time + cv_drawboxtime_e + cv_drawstate_time_e
#p rint("after",delay_on)
else:
delay_off = delay_off + inferreq_end_time + cv_drawboxtime_e + cv_drawstate_time_e
# Write video or image file
if not image_flag:
if args.toggle_video is "ON":
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.imshow('frame',frame)
if args.write_video is "Y":
out.write(frame)
else:
### TODO: Write an output image if `single_image_mode` ###
cv2.imwrite('output_image.jpg', frame)
print("Image saved sucessfully!")
### TODO: Send the frame to the FFMPEG server ###
if args.toggle_video is "ON":
a = None
if key_pressed == 27:
break
cap.release()
cv2.destroyAllWindows()
print("Last frame prcessed no: ",frame_count)
print("-----AccuracyLog-----")
if len(log_person_counted) > 1: # Only if counting single person
print("No Of person:")
print(log_person_counted)
# print("Duration stayed timebase:") # This is untested experimental feature
# print(log_duration_timebase)
print("Duration stayed fpsbase:")
print(log_duration_fpsbase)
print("Frame No.:")
print(log_frame_no)
log_infer_time = np.array(log_infer_time) # Convert list to np array
print("Inference time:[min max avg.]")
print([log_infer_time.min(),log_infer_time.max(),(float("{:.2f}".format(np.average(log_infer_time))))])
else:
print("N/A")
log_infer_time = np.array(log_infer_time) # Convert list to np array
print("Inference time:[min max avg.]")
print([log_infer_time.min(),log_infer_time.max(),(float("{:.2f}".format(np.average(log_infer_time))))])
print("-----Error log-----")
if len(log_multicounted) < 10 and len(log_multicounted) > 1: # Only if counting single person
print("Frame No: Count")
print(log_multicounted)
else:
print("N/A")
print("-----Finish!------")
def main():
"""
Load the network and parse the output.
:return: None
"""
# Grab command line args
# This is different method so do not use .m type attributes instead use whole name.
args = build_argparser().parse_args()
print("Commandline Arguments received")
print("-----Information-----")
print("Model path:",args.model)
print("Video/Image path:",args.input)
print("Video fps:",args.fps)
print("Device:",args.device)
print("CPU Ext. path:",args.cpu_extension)
print("BoundingBox color:",args.box_color)
print("Confidence:",args.prob_threshold)
print("Alarm People count:",args.alarm_people)
print("Alarm Person duration Sec.:",args.alarm_duration)
print("Web cam ID(If any):",args.cam_id)
print("Delay Band(ms):", args.delay_band)
print("Toggle video feed on/off:",args.toggle_video)
print("Write output to video file Y or N:",args.write_video)
print("-----------------------")
# Perform inference on the input stream
infer_on_stream(args)
if __name__ == '__main__':
main()
| [
37811,
32,
13,
72,
4380,
15034,
37811,
198,
37811,
198,
15269,
685,
42334,
60,
685,
11682,
39,
6239,
36817,
15154,
40,
60,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832... | 2.248741 | 11,321 |
import libtcodpy as libtcod
from model.action import ActionTag
from model.attribute import Attribute, AttributeTag
from model.entity import Entity
from ui.game.frame_world import WorldRenderType
from behavior import Behavior
#note: an explicit from [] import * because if I don't, using entity_utilities functions in lambdas throws a fit
from model.entity_utilities import *
# TODO: pull this code out into a more generic one for programs
| [
11748,
9195,
83,
19815,
9078,
355,
9195,
83,
19815,
198,
6738,
2746,
13,
2673,
1330,
7561,
24835,
198,
6738,
2746,
13,
42348,
1330,
3460,
4163,
11,
3460,
4163,
24835,
198,
6738,
2746,
13,
26858,
1330,
20885,
198,
6738,
334,
72,
13,
60... | 3.911504 | 113 |
from pydoc import doc
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.test.utils import get_tmpfile
print(f'creating docs')
print(common_texts)
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(common_texts)]
print(len(documents))
print(f'training model')
model = Doc2Vec(documents, vector_size=5, window=2, min_count=1, workers=4)
print(f'saving model')
fname = get_tmpfile("my_doc2vec_model")
model.save(fname)
print(f'loading model')
model = Doc2Vec.load(fname)
print(f'testing')
vector = model.infer_vector(["system", "response"])
print(vector)
| [
6738,
279,
5173,
420,
1330,
2205,
198,
6738,
308,
641,
320,
13,
9288,
13,
26791,
1330,
2219,
62,
5239,
82,
198,
6738,
308,
641,
320,
13,
27530,
13,
15390,
17,
35138,
1330,
14432,
17,
53,
721,
11,
309,
14655,
24941,
198,
6738,
308,
... | 2.718615 | 231 |
from brownie import * | [
6738,
7586,
494,
1330,
1635
] | 4.2 | 5 |
import unyt as u
from mc_examples.realistic_workflows.graphene_slitpore.utils import create_system
from mc_examples.realistic_workflows.graphene_slitpore.gcmc_pore.runners import run_gcmc
if __name__ == "__main__":
main()
| [
11748,
555,
20760,
355,
334,
628,
198,
6738,
36650,
62,
1069,
12629,
13,
5305,
2569,
62,
1818,
44041,
13,
70,
2416,
29473,
62,
6649,
270,
79,
382,
13,
26791,
1330,
2251,
62,
10057,
198,
6738,
36650,
62,
1069,
12629,
13,
5305,
2569,
... | 2.595506 | 89 |
import os
import numpy as np
import pandas as pd
from datetime import datetime as dt
import logging
from telemanom.helpers import Config
from telemanom.errors import Errors
import telemanom.helpers as helpers
from telemanom.channel import Channel
from telemanom.modeling import Model
logger = helpers.setup_logging() | [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
11748,
18931,
198,
198,
6738,
5735,
805,
296,
13,
16794,
364,
1330,
17056,
198,
6738,
... | 3.573034 | 89 |
import os
from jinja2 import Environment, FileSystemLoader
| [
11748,
28686,
198,
6738,
474,
259,
6592,
17,
1330,
9344,
11,
9220,
11964,
17401,
198
] | 3.933333 | 15 |