index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,500 | 7783342a45e6de8c8d6f8af61fe92678607a563e | #!/usr/local/bin/python3
for i in range(3):
print("hello world")
|
983,501 | e75814e9279b3d9a8cbcc546e163ddf7db379225 | # functions for extracting domain names only
from urllib.parse import urlparse
def get_subdomainname(url):
try:
# give url and the function will parse through it and return the network location
return urlparse(url).netloc
except:
return ''
# get domainname(vulnerabilityscanner.com)
def get_domainname(url):
try:
result = get_subdomainname(url).split('.')
return result[-2] + '.' + result[-1]
except:
return ''
|
983,502 | abc0f098b8f78b010f005ca5c1442c7d763db0e4 | import random
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QRadioButton
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QWidget
def gen_password():
length = int(plength.text())
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@£$%^&*().,?0123456789'
password = ''
if msg.text():
msg.setText("")
plength.setText("")
else:
for c in range(length):
password += random.choice(chars)
msg.setText(password)
def clear_form():
msg.setText("")
plength.setText("")
# pyqt5 ui
app = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle('Password Generator')
layout = QVBoxLayout()
layout.addWidget(QLabel('Enter desired password length'))
plength = QLineEdit('')
layout.addWidget(plength)
btn = QPushButton('Generate')
btn.clicked.connect(gen_password)
layout.addWidget(btn)
rbtn = QPushButton('Reset')
rbtn.clicked.connect(clear_form)
layout.addWidget(rbtn)
msg = QLineEdit('')
layout.addWidget(msg)
window.setLayout(layout)
window.show()
sys.exit(app.exec_())
|
983,503 | e6a72730ea2784c69c50da80ad8669ed08cfae8c | ""
Create a function that takes a list and a string as arguments and return the index of the string.
""
def find_index(lst, str):
return (lst.index(str))
|
983,504 | ba70e566868b39775d648dc528dd52a1ccc6a033 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
_CATAPULT_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir))
sys.path.insert(0, os.path.join(_CATAPULT_PATH, 'third_party', 'mock'))
# pylint: disable=wrong-import-position
import mock
from bisect_lib import fetch_intervening_revisions
# pylint: enable=wrong-import-position
_TEST_DATA = os.path.join(os.path.dirname(__file__), 'test_data')
class FetchInterveningRevisionsTest(unittest.TestCase):
def testFetchInterveningRevisions(self):
response = open(os.path.join(_TEST_DATA, 'MOCK_RANGE_RESPONSE_1'))
with mock.patch('urllib2.urlopen', mock.MagicMock(return_value=response)):
revs = fetch_intervening_revisions.FetchInterveningRevisions(
'53fc07eb478520a80af6bf8b62be259bb55db0f1',
'c89130e28fd01062104e1be7f3a6fc3abbb80ca9',
depot_name='chromium')
self.assertEqual(
revs, [
('32ce3b13924d84004a3e05c35942626cbe93cbbd', '356382'),
('07a6d9854efab6677b880defa924758334cfd47d', '356383'),
('22e49fb496d6ffa122c470f6071d47ccb4ccb672', '356384'),
('5dbc149bebecea186b693b3d780b6965eeffed0f', '356385'),
('ebd5f102ee89a4be5c98815c02c444fbf2b6b040', '356386'),
('84f6037e951c21a3b00bd3ddd034f258da6839b5', '356387'),
('48c1471f1f503246dd66753a4c7588d77282d2df', '356388'),
('66aeb2b7084850d09f3fccc7d7467b57e4da1882', '356389'),
('01542ac6d0fbec6aa78e33e6c7ec49a582072ea9', '356390'),
('8414732168a8867a5d6bd45eaade68a5820a9e34', '356391'),
('4f81be50501fbc02d7e44df0d56032e5885e19b6', '356392'),
('7bd1741893bd4e233b5562a6926d7e395d558343', '356393'),
('ee261f306c3c66e96339aa1026d62a6d953302fe', '356394'),
('f1c777e3f97a16cc6a3aa922a23602fa59412989', '356395'),
('8fcc8af20a3d41b0512e3b1486e4dc7de528a72b', '356396'),
('3861789af25e2d3502f0fb7080da5785d31308aa', '356397'),
('6feaa73a54d0515ad2940709161ca0a5ad91d1f8', '356398'),
('2e93263dc74f0496100435e1fd7232e9e8323af0', '356399')
])
def testFetchInterveningRevisionsPagination(self):
def MockUrlopen(url):
if 's=' not in url:
return open(os.path.join(_TEST_DATA, 'MOCK_RANGE_RESPONSE_2_PAGE_1'))
return open(os.path.join(_TEST_DATA, 'MOCK_RANGE_RESPONSE_2_PAGE_2'))
with mock.patch('urllib2.urlopen', MockUrlopen):
revs = fetch_intervening_revisions.FetchInterveningRevisions(
'7bd1741893bd4e233b5562a6926d7e395d558343',
'3861789af25e2d3502f0fb7080da5785d31308aa',
depot_name='chromium')
self.assertEqual(
revs, [
('ee261f306c3c66e96339aa1026d62a6d953302fe', '356394'),
('f1c777e3f97a16cc6a3aa922a23602fa59412989', '356395'),
('8fcc8af20a3d41b0512e3b1486e4dc7de528a72b', '356396'),
])
if __name__ == '__main__':
unittest.main()
|
983,505 | 2da37b7f33fa5ddf2f8fb310d3599b90f107dac8 | import sys
from flask import Blueprint, jsonify
# from sqlalchemy_utils import database_exists
base = Blueprint("base", __name__, url_prefix="/api")
# from haoez_api_server import db, __version__
from haoez_api_server import __version__
@base.route("/", methods=["GET"])
def info():
return jsonify({"version": __version__})
@base.route("/db")
def test_db():
try:
# TODO
# database_exists(db.engine.url)
# return "<h1>It works.</h1>"
raise Exception("SQL didn't deploy!")
except Exception as e:
print(e, file=sys.stderr)
return "<h1>Something is broken.</h1>", 404
|
983,506 | 13012c7d43def8bee32e1b89d0c0ce7cfa0e20cd | #Receding Horizon Planning Framework
import numpy as np #Numpy
import casadi as ca #Casadi
from Humanoid_ProblemDescription_3Order_Bezier import *
#Initialization and Porblem Setup
# Set Decimal Printing Precision
np.set_printoptions(precision=4)
# Define the Swing foot of the First Step
LeftSwingFlag = 1
RightSwingFlag = 0
# Initial Condition of the Robot
# Starting CoM State
C_start = [0., 0.1, 0.55]
# Starting CoM Velocity
Cdot_start = [0.,0.,0.]
# Starting CoM Acceleration
Cddot_start = [0.,0.0,0.0]
# Expected Terminal Condition of the Robot
C_end = [5, 0.1, 0.55]
# Initial Angular Momentum
L_start = [0,0,0]
# Initial Angular Momentum Rate
Ldot_start = [0,0,0]
# Timing Configuration NOTE: May become a search variable
TimeVec = [0.4,0.2]
T = TimeVec[0]+TimeVec[1]
# Initial Contact Locations
PL_init = [0.,0.1,0]
PR_init = [0,-0.1,0]
#Build Solver
solver, DecisionVars_lb, DecisionVars_ub, glb, gub, var_index = BuildSolver(FirstLevel = "Bezier_SingleStep_Discrete_Order3")
#solver, DecisionVars_lb, DecisionVars_ub, glb, gub, var_index = BuildSolver(FirstLevel = "Bezier_SingleStep_Discrete_Order3", SecondLevel = "CoM_Dynamics_Fixed_Time", m = 95)
#Build initial Seed (for QP it is not that important)
np.random.seed()
DecisionVarsShape = DecisionVars_lb.shape
DecisionVars_init = DecisionVars_lb + np.multiply(np.random.rand(DecisionVarsShape[0],).flatten(),(DecisionVars_ub-DecisionVars_lb))# Fixed Value Initial Guess
#NOTE:Here we should have a for-loop to solve multiple solutions
P_next = [0.1,-0.1,0]
ParaList = np.concatenate((LeftSwingFlag,RightSwingFlag,C_start,Cdot_start,Cddot_start,C_end,L_start,Ldot_start,TimeVec,T,PL_init,PR_init,P_next),axis=None)
res = solver(x0=DecisionVars_init, p = ParaList,lbx = DecisionVars_lb, ubx = DecisionVars_ub,lbg = glb, ubg = gub)
x_opt = res['x']
print(solver.stats()["success"])
#print('x_opt: ', x_opt)
print(res)
#print results
var_index_L1 = var_index["Level1_Var_Index"]
Cy_res = x_opt[var_index_L1["Cy"][0]:var_index_L1["Cy"][1]+1]
print("Cy_res")
print(Cy_res)
Ldot0_res = x_opt[var_index_L1["Ldot0"][0]:var_index_L1["Ldot0"][1]+1]
print("Ldot0_res")
print(Ldot0_res)
Ldot1_res = x_opt[var_index_L1["Ldot1"][0]:var_index_L1["Ldot1"][1]+1]
print("Ldot1_res")
print(Ldot1_res)
Ldot2_res = x_opt[var_index_L1["Ldot2"][0]:var_index_L1["Ldot2"][1]+1]
print("Ldot2_res")
print(Ldot2_res)
Ldot3_res = x_opt[var_index_L1["Ldot3"][0]:var_index_L1["Ldot3"][1]+1]
print("Ldot3_res")
print(Ldot3_res)
#L0_res = x_opt[var_index_L1["L0"][0]:var_index_L1["L0"][1]+1]
#print("L0_res")
#print(L0_res)
#L1_res = x_opt[var_index_L1["L1"][0]:var_index_L1["L1"][1]+1]
#print("L1_res")
#print(L1_res)
#L2_res = x_opt[var_index_L1["L2"][0]:var_index_L1["L2"][1]+1]
#print("L2_res")
#print(L2_res)
#L3_res = x_opt[var_index_L1["L3"][0]:var_index_L1["L3"][1]+1]
#print("L3_res")
#print(L3_res)
#L4_res = x_opt[var_index_L1["L4"][0]:var_index_L1["L4"][1]+1]
#print("L4_res")
#print(L4_res)
FL1_init_p0_res = x_opt[var_index_L1["FL1_initdouble_p0"][0]:var_index_L1["FL1_initdouble_p0"][1]+1]
print("FL1_init_p0_res")
print(FL1_init_p0_res)
FL1_init_p1_res = x_opt[var_index_L1["FL1_initdouble_p1"][0]:var_index_L1["FL1_initdouble_p1"][1]+1]
print("FL1_init_p1_res")
print(FL1_init_p1_res)
FL1_swing_p0_res = x_opt[var_index_L1["FL1_swing_p0"][0]:var_index_L1["FL1_swing_p0"][1]+1]
print("FL1_swing_p0_res")
print(FL1_swing_p0_res)
FL1_swing_p1_res = x_opt[var_index_L1["FL1_swing_p1"][0]:var_index_L1["FL1_swing_p1"][1]+1]
print("FL1_swing_p1_res")
print(FL1_swing_p1_res)
#FL1_double_p0_res = x_opt[var_index_L1["FL1_double_p0"][0]:var_index_L1["FL1_double_p0"][1]+1]
#print("FL1_double_p0_res")
#print(FL1_double_p0_res)
#FL1_double_p1_res = x_opt[var_index_L1["FL1_double_p1"][0]:var_index_L1["FL1_double_p1"][1]+1]
#print("FL1_double_p1_res")
#print(FL1_double_p1_res)
FL2_init_p0_res = x_opt[var_index_L1["FL2_initdouble_p0"][0]:var_index_L1["FL2_initdouble_p0"][1]+1]
print("FL2_init_p0_res")
print(FL2_init_p0_res)
FL2_init_p1_res = x_opt[var_index_L1["FL2_initdouble_p1"][0]:var_index_L1["FL2_initdouble_p1"][1]+1]
print("FL2_init_p1_res")
print(FL2_init_p1_res)
FL2_swing_p0_res = x_opt[var_index_L1["FL2_swing_p0"][0]:var_index_L1["FL2_swing_p0"][1]+1]
print("FL2_swing_p0_res")
print(FL2_swing_p0_res)
FL2_swing_p1_res = x_opt[var_index_L1["FL2_swing_p1"][0]:var_index_L1["FL2_swing_p1"][1]+1]
print("FL2_swing_p1_res")
print(FL2_swing_p1_res)
#FL2_double_p0_res = x_opt[var_index_L1["FL2_double_p0"][0]:var_index_L1["FL2_double_p0"][1]+1]
#print("FL2_double_p0_res")
#print(FL2_double_p0_res)
#FL2_double_p1_res = x_opt[var_index_L1["FL2_double_p1"][0]:var_index_L1["FL2_double_p1"][1]+1]
#print("FL2_double_p1_res")
#print(FL2_double_p1_res)
FL3_init_p0_res = x_opt[var_index_L1["FL3_initdouble_p0"][0]:var_index_L1["FL3_initdouble_p0"][1]+1]
print("FL3_init_p0_res")
print(FL3_init_p0_res)
FL3_init_p1_res = x_opt[var_index_L1["FL3_initdouble_p1"][0]:var_index_L1["FL3_initdouble_p1"][1]+1]
print("FL3_init_p1_res")
print(FL3_init_p1_res)
FL3_swing_p0_res = x_opt[var_index_L1["FL3_swing_p0"][0]:var_index_L1["FL3_swing_p0"][1]+1]
print("FL3_swing_p0_res")
print(FL3_swing_p0_res)
FL3_swing_p1_res = x_opt[var_index_L1["FL3_swing_p1"][0]:var_index_L1["FL3_swing_p1"][1]+1]
print("FL3_swing_p1_res")
print(FL3_swing_p1_res)
#FL3_double_p0_res = x_opt[var_index_L1["FL3_double_p0"][0]:var_index_L1["FL3_double_p0"][1]+1]
#print("FL3_double_p0_res")
#print(FL3_double_p0_res)
#FL3_double_p1_res = x_opt[var_index_L1["FL3_double_p1"][0]:var_index_L1["FL3_double_p1"][1]+1]
#print("FL3_double_p1_res")
#print(FL3_double_p1_res)
FL4_init_p0_res = x_opt[var_index_L1["FL4_initdouble_p0"][0]:var_index_L1["FL4_initdouble_p0"][1]+1]
print("FL4_init_p0_res")
print(FL4_init_p0_res)
FL4_init_p1_res = x_opt[var_index_L1["FL4_initdouble_p1"][0]:var_index_L1["FL4_initdouble_p1"][1]+1]
print("FL4_init_p1_res")
print(FL4_init_p1_res)
FL4_swing_p0_res = x_opt[var_index_L1["FL4_swing_p0"][0]:var_index_L1["FL4_swing_p0"][1]+1]
print("FL4_swing_p0_res")
print(FL4_swing_p0_res)
FL4_swing_p1_res = x_opt[var_index_L1["FL4_swing_p1"][0]:var_index_L1["FL4_swing_p1"][1]+1]
print("FL4_swing_p1_res")
print(FL4_swing_p1_res)
#FL4_double_p0_res = x_opt[var_index_L1["FL4_double_p0"][0]:var_index_L1["FL4_double_p0"][1]+1]
#print("FL4_double_p0_res")
#print(FL4_double_p0_res)
#FL4_double_p1_res = x_opt[var_index_L1["FL4_double_p1"][0]:var_index_L1["FL4_double_p1"][1]+1]
#print("FL4_double_p1_res")
#print(FL4_double_p1_res)
FR1_init_p0_res = x_opt[var_index_L1["FR1_initdouble_p0"][0]:var_index_L1["FR1_initdouble_p0"][1]+1]
print("FR1_init_p0_res")
print(FR1_init_p0_res)
FR1_init_p1_res = x_opt[var_index_L1["FR1_initdouble_p1"][0]:var_index_L1["FR1_initdouble_p1"][1]+1]
print("FR1_init_p1_res")
print(FR1_init_p1_res)
FR1_swing_p0_res = x_opt[var_index_L1["FR1_swing_p0"][0]:var_index_L1["FR1_swing_p0"][1]+1]
print("FR1_swing_p0_res")
print(FR1_swing_p0_res)
FR1_swing_p1_res = x_opt[var_index_L1["FR1_swing_p1"][0]:var_index_L1["FR1_swing_p1"][1]+1]
print("FR1_swing_p1_res")
print(FR1_swing_p1_res)
#FR1_double_p0_res = x_opt[var_index_L1["FR1_double_p0"][0]:var_index_L1["FR1_double_p0"][1]+1]
#print("FR1_double_p0_res")
#print(FR1_double_p0_res)
#FR1_double_p1_res = x_opt[var_index_L1["FR1_double_p1"][0]:var_index_L1["FR1_double_p1"][1]+1]
#print("FR1_double_p1_res")
#print(FR1_double_p1_res)
FR2_init_p0_res = x_opt[var_index_L1["FR2_initdouble_p0"][0]:var_index_L1["FR2_initdouble_p0"][1]+1]
print("FR2_init_p0_res")
print(FR2_init_p0_res)
FR2_init_p1_res = x_opt[var_index_L1["FR2_initdouble_p1"][0]:var_index_L1["FR2_initdouble_p1"][1]+1]
print("FR2_init_p1_res")
print(FR2_init_p1_res)
FR2_swing_p0_res = x_opt[var_index_L1["FR2_swing_p0"][0]:var_index_L1["FR2_swing_p0"][1]+1]
print("FR2_swing_p0_res")
print(FR2_swing_p0_res)
FR2_swing_p1_res = x_opt[var_index_L1["FR2_swing_p1"][0]:var_index_L1["FR2_swing_p1"][1]+1]
print("FR2_swing_p1_res")
print(FR2_swing_p1_res)
#FR2_double_p0_res = x_opt[var_index_L1["FR2_double_p0"][0]:var_index_L1["FR2_double_p0"][1]+1]
#print("FR2_double_p0_res")
#print(FR2_double_p0_res)
#FR2_double_p1_res = x_opt[var_index_L1["FR2_double_p1"][0]:var_index_L1["FR2_double_p1"][1]+1]
#print("FR2_double_p1_res")
#print(FR2_double_p1_res)
FR3_init_p0_res = x_opt[var_index_L1["FR3_initdouble_p0"][0]:var_index_L1["FR3_initdouble_p0"][1]+1]
print("FR3_init_p0_res")
print(FR3_init_p0_res)
FR3_init_p1_res = x_opt[var_index_L1["FR3_initdouble_p1"][0]:var_index_L1["FR3_initdouble_p1"][1]+1]
print("FR3_init_p1_res")
print(FL3_init_p1_res)
FR3_swing_p0_res = x_opt[var_index_L1["FR3_swing_p0"][0]:var_index_L1["FR3_swing_p0"][1]+1]
print("FR3_swing_p0_res")
print(FR3_swing_p0_res)
FR3_swing_p1_res = x_opt[var_index_L1["FR3_swing_p1"][0]:var_index_L1["FR3_swing_p1"][1]+1]
print("FR3_swing_p1_res")
print(FR3_swing_p1_res)
#FR3_double_p0_res = x_opt[var_index_L1["FR3_double_p0"][0]:var_index_L1["FR3_double_p0"][1]+1]
#print("FR3_double_p0_res")
#print(FR3_double_p0_res)
#FR3_double_p1_res = x_opt[var_index_L1["FR3_double_p1"][0]:var_index_L1["FR3_double_p1"][1]+1]
#print("FR3_double_p1_res")
#print(FR3_double_p1_res)
FR4_init_p0_res = x_opt[var_index_L1["FR4_initdouble_p0"][0]:var_index_L1["FR4_initdouble_p0"][1]+1]
print("FR4_init_p0_res")
print(FR4_init_p0_res)
FR4_init_p1_res = x_opt[var_index_L1["FR4_initdouble_p1"][0]:var_index_L1["FR4_initdouble_p1"][1]+1]
print("FR4_init_p1_res")
print(FR4_init_p1_res)
FR4_swing_p0_res = x_opt[var_index_L1["FR4_swing_p0"][0]:var_index_L1["FR4_swing_p0"][1]+1]
print("FR4_swing_p0_res")
print(FR4_swing_p0_res)
FR4_swing_p1_res = x_opt[var_index_L1["FR4_swing_p1"][0]:var_index_L1["FR4_swing_p1"][1]+1]
print("FR4_swing_p1_res")
print(FR4_swing_p1_res)
#FR4_double_p0_res = x_opt[var_index_L1["FR4_double_p0"][0]:var_index_L1["FR4_double_p0"][1]+1]
#print("FR4_double_p0_res")
#print(FR4_double_p0_res)
#FR4_double_p1_res = x_opt[var_index_L1["FR4_double_p1"][0]:var_index_L1["FR4_double_p1"][1]+1]
#print("FR4_double_p1_res")
#print(FR4_double_p1_res)
#Plot trajectories
C_p0 = np.array(C_start)
C_p1 = T/3*np.array(Cdot_start) + C_p0
C_p2 = T**2/6*np.array(Cddot_start) + 2*C_p1 - C_p0
# Compute Control points for L, Ldot
#L0 = np.array(L_start)
#L1 = np.array(Ldot_start)*T/4+L0
#init time tick
t = 0.1
#Loop over all Phases (Knots)
Nphase = 2
Nk_Local = 5
for Nph in range(Nphase):
#Decide Number of Knots
if Nph == Nphase-1: #The last Knot belongs to the Last Phase
Nk_ThisPhase = Nk_Local+1
else:
Nk_ThisPhase = Nk_Local
#Compute time increment
delta_t = TimeVec[Nph]/Nk_Local
for Local_k_Count in range(Nk_ThisPhase):
C_t = 1.0*C_p0*(1 - t/T)**3 + 3.0*C_p1*(t/T)*(1 - t/T)**2 + 3.0*C_p2*(t/T)**2*(1 - (t/T)) + 1.0*(t/T)**3*Cy_res
#L_t = 1.0*L0*(1 - t/T)**4 + 4.0*L1*(t/T)*(1 - t/T)**3 + 6.0*L2_res*(t/T)**2*(1 - t/T)**2 + 4.0*L3_res*(t/T)**3*(1 - t/T) + 1.0*L4_res*(t/T)**4
#print(C_t[1])
#print("momentum")
#print(L_t)
t = t + delta_t
#Check Newton Euler Equation
t = 0.1
#phase_t = t-TimeVec[0]
phase_t = t
#phase_t = t - TimeVec[0] - TimeVec[1]
G = [0,0,-9.80665]
m = 95
FL1_t = FL1_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FL1_init_p1_res*t/TimeVec[0]
FL2_t = FL2_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FL2_init_p1_res*t/TimeVec[0]
FL3_t = FL3_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FL3_init_p1_res*t/TimeVec[0]
FL4_t = FL4_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FL4_init_p1_res*t/TimeVec[0]
##
FR1_t = FR1_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FR1_init_p1_res*t/TimeVec[0]
FR2_t = FR2_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FR2_init_p1_res*t/TimeVec[0]
FR3_t = FR3_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FR3_init_p1_res*t/TimeVec[0]
FR4_t = FR4_init_p0_res*(1.0 - 1.0*t/TimeVec[0]) + 1.0*FR4_init_p1_res*t/TimeVec[0]
#swing phase - 2 phase motion
#FL1_t = FL1_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FL1_swing_p1_res*phase_t/TimeVec[1]
#FL2_t = FL2_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FL2_swing_p1_res*phase_t/TimeVec[1]
#FL3_t = FL3_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FL3_swing_p1_res*phase_t/TimeVec[1]
#FL4_t = FL4_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FL4_swing_p1_res*phase_t/TimeVec[1]
#
#FR1_t = FR1_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FR1_swing_p1_res*phase_t/TimeVec[1]
#FR2_t = FR2_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FR2_swing_p1_res*phase_t/TimeVec[1]
#FR3_t = FR3_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FR3_swing_p1_res*phase_t/TimeVec[1]
#FR4_t = FR4_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[1]) + 1.0*FR4_swing_p1_res*phase_t/TimeVec[1]
#Swing phase - one phase motion
#FL1_t = FL1_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FL1_swing_p1_res*phase_t/TimeVec[0]
#FL2_t = FL2_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FL2_swing_p1_res*phase_t/TimeVec[0]
#FL3_t = FL3_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FL3_swing_p1_res*phase_t/TimeVec[0]
#FL4_t = FL4_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FL4_swing_p1_res*phase_t/TimeVec[0]
#
#FR1_t = FR1_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FR1_swing_p1_res*phase_t/TimeVec[0]
#FR2_t = FR2_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FR2_swing_p1_res*phase_t/TimeVec[0]
#FR3_t = FR3_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FR3_swing_p1_res*phase_t/TimeVec[0]
#FR4_t = FR4_swing_p0_res*(1.0 - 1.0*phase_t/TimeVec[0]) + 1.0*FR4_swing_p1_res*phase_t/TimeVec[0]
#Ldot_t = (-L3_res + L4_res)*4/T*(t/T)**3 + (-L2_res + L3_res)*12.0/T*(t/T)**2*(1 - t/T)+ (-L1 + L2_res)*12.0/T*(t/T)*(1 - t/T)**2+ (-L0 + L1)*4.0/T*(1 - t/T)**3
Cddot_force = (FL1_t + FL2_t + FL3_t + FL4_t + FR1_t + FR2_t + FR3_t + FR4_t)/m
C_p0 = np.array(C_start)
C_p1 = T/3*np.array(Cdot_start) + C_p0
C_p2 = T**2/6*np.array(Cddot_start) + 2*C_p1 - C_p0
C_t = 1.0*C_p0*(1 - t/T)**3 + 3.0*C_p1*(t/T)*(1 - t/T)**2 + 3.0*C_p2*(t/T)**2*(1 - (t/T)) + 1.0*(t/T)**3*Cy_res
Cddot_t = 6.0*(t/T)*(C_p1 - 2*C_p2 + Cy_res)/T**2 + 6*(1.0 - 1.0*t/T)*(C_p0 - 2*C_p1 + C_p2)/T**2 - G
PL_init = np.array(PL_init)
PR_init = np.array(PR_init)
#Ldot_force_t = np.cross(np.array(PL_init+np.array([0.11,0.06,0])-C_t).reshape((1,3)),np.array(FL1_t).reshape((1,3))) + np.cross(np.array(PL_init+np.array([0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FL2_t).reshape((1,3))) + np.cross(np.array(PL_init+np.array([-0.11,0.06,0])-C_t).reshape((1,3)),np.array(FL3_t).reshape((1,3))) + np.cross(np.array(PL_init+np.array([-0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FL4_t).reshape((1,3))) + np.cross(np.array(PR_init+np.array([0.11,0.06,0])-C_t).reshape((1,3)),np.array(FR1_t).reshape((1,3))) + np.cross(np.array(PR_init+np.array([0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FR2_t).reshape((1,3))) + np.cross(np.array(PR_init+np.array([-0.11,0.06,0])-C_t).reshape((1,3)),np.array(FR3_t).reshape((1,3))) + np.cross(np.array(PR_init+np.array([-0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FR4_t).reshape((1,3)))
print("Acc-force - without Gravity")
print(Cddot_force)
print("Acc - without Gravity")
print(Cddot_t)
#print("Ldot")
#print(Ldot_t)
#print("Ldot_force")
#print(Ldot_force_t)
print("CoM Pos")
print(C_t)
print("FL1_t Momentum Rate")
FL1_mo = np.cross(np.array(PL_init+np.array([0.11,0.06,0])-C_t).reshape((1,3)),np.array(FL1_t).reshape((1,3)))
print(FL1_mo[0][0])
print("FL2_t Momentum Rate")
FL2_mo = np.cross(np.array(PL_init+np.array([0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FL2_t).reshape((1,3)))
print(FL2_mo[0][0])
print("FL3_t Momentum Rate")
FL3_mo = np.cross(np.array(PL_init+np.array([-0.11,0.06,0])-C_t).reshape((1,3)),np.array(FL3_t).reshape((1,3)))
print(FL3_mo[0][0])
print("FL4_t Momentum Rate")
FL4_mo = np.cross(np.array(PL_init+np.array([-0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FL4_t).reshape((1,3)))
print(FL4_mo[0][0])
print("FR1_t Momentum Rate")
FR1_mo = np.cross(np.array(PR_init+np.array([0.11,0.06,0])-C_t).reshape((1,3)),np.array(FR1_t).reshape((1,3)))
print(FR1_mo[0][0])
print("FL2_t Momentum Rate")
FR2_mo = np.cross(np.array(PR_init+np.array([0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FR2_t).reshape((1,3)))
print(FR2_mo[0][0])
print("FL3_t Momentum Rate")
FR3_mo = np.cross(np.array(PR_init+np.array([-0.11,0.06,0])-C_t).reshape((1,3)),np.array(FR3_t).reshape((1,3)))
print(FR3_mo[0][0])
print("FL4_t Momentum Rate")
FR4_mo = np.cross(np.array(PR_init+np.array([-0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FR4_t).reshape((1,3)))
print(FR4_mo[0][0])
#print("sum")
#print(FL1_mo[0][0]+FL2_mo[0][0]+FL3_mo[0][0]+FL4_mo[0][0])
#print("FR1_t Momentum Rate")
#FR1_mo = np.cross(np.array(PR_init+np.array([0.11,0.06,0])-C_t).reshape((1,3)),np.array(FR1_t).reshape((1,3)))
#print(FR1_mo[0][0])
#print("FR2_t Momentum Rate")
#R2_mo = np.cross(np.array(PR_init+np.array([0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FR2_t).reshape((1,3)))
#print(FR2_mo[0][0])
#pint("FR3_t Momentum Rate")
#FR3_mo = np.cross(np.array(PR_init+np.array([-0.11,0.06,0])-C_t).reshape((1,3)),np.array(FR3_t).reshape((1,3)))
#print(FR3_mo[0][0])
#print("FR4_t Momentum Rate")
#FR4_mo = np.cross(np.array(PR_init+np.array([-0.11,-0.06,0])-C_t).reshape((1,3)),np.array(FR4_t).reshape((1,3)))
#print(FR4_mo[0][0])
#print("sum")
#print(FR1_mo[0][0]+FR2_mo[0][0]+FR3_mo[0][0]+FR4_mo[0][0])
print("FL1_t")
print(FL1_t)
print("FL2_t")
print(FL2_t)
print("FL3_t")
print(FL3_t)
print("FL4_t")
print(FL4_t)
print("FR1_t")
print(FR1_t)
print("FR2_t")
print(FR2_t)
print("FR3_t")
print(FR3_t)
print("FR4_t")
print(FR4_t)
print("C_end")
print(1.0*C_p0*(1 - T/T)**3 + 3.0*C_p1*(T/T)*(1 - T/T)**2 + 3.0*C_p2*(T/T)**2*(1 - (T/T)) + 1.0*(T/T)**3*Cy_res)
print("Cdot_end")
print(3.0*(T/T)**2*(-C_p2 + Cy_res)/T + 6.0*(T/T)*(1 - T/T)*(-C_p1 + C_p2)/T + 3.0*(1 - T/T)**2*(-C_p0 + C_p1)/T)
print("Cddot_end")
print(6.0*(T/T)*(C_p1 - 2*C_p2 + Cy_res)/T**2 + 6*(1.0 - 1.0*T/T)*(C_p0 - 2*C_p1 + C_p2)/T**2)
#print("L_end")
#print(1.0*L0*(1 - T/T)**4 + 4.0*L1*(T/T)*(1 - T/T)**3 + 6.0*L2_res*(T/T)**2*(1 - T/T)**2 + 4.0*L3_res*(T/T)**3*(1 - T/T) + 1.0*L4_res*(T/T)**4)
#print("Ldot_end")
#print((-L3_res + L4_res)*4/T*(T/T)**3 + (-L2_res + L3_res)*12.0/T*(T/T)**2*(1 - T/T)+ (-L1 + L2_res)*12.0/T*(T/T)*(1 - T/T)**2+ (-L0 + L1)*4.0/T*(1 - T/T)**3)
|
983,507 | 16cd475b1df0b0119b3d3aad803a23f95b042d6b | import socket
c=socket.socket()
c.connect(('localhost',9999))
print('Client waiting for connection')
while(True):
data=input("Enter message:")
c.sendall(data.encode())
msg=c.recv(1024)
print("Server echoed:"+msg.decode())
if (msg.decode()=='stop'):
print('Stopping connetion')
break
c.close() |
983,508 | 9067178bf55a9e124651d4a7f8f287b02b5fbf87 | # register as module
|
983,509 | b0d3533c1fde97f2e91a594d663af247b8de6cee | from .big_controller import *
|
983,510 | 912f715ad2f92c7a9db52b9e7ff3e43984bbfc34 | from core.application.repositories.work_repository import WorkRepository
from core.application.CQRS.load_works_command import LoadWorksCommand
from core.domain.title import Title
from core.domain.contributor import Contributor
from core.domain.iswc import Iswc
from core.domain.source import Source
from core.domain.id import Id
from core.domain.work import Work
class LoadWorksCommandHandler:
__workRepository: WorkRepository
def __init__(self, workRepository: WorkRepository):
self._workRepository = workRepository
def handle(self, command: LoadWorksCommand):
works = command.works()
for i in range(len(works)):
rawWork = works[i]
work = self.createWork(rawWork)
self._workRepository.add(work)
def createWork(self, rawWork):
title = Title(rawWork['title'])
contributorsList = rawWork['contributors'].split('|')
contributors = []
for contributor in contributorsList:
contributors.append(Contributor(contributor))
iswc = Iswc(rawWork['iswc'])
source = Source(rawWork['source'])
id = Id(rawWork['id'])
work = Work(title, contributors, iswc, source, id)
return work
|
983,511 | 8bc3912b7a24b4385e2c72f269f83062a6f71da1 | """
BSD 3-Clause License
Copyright (c) Soumith Chintala 2016,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the BSD 3-Clause License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://spdx.org/licenses/BSD-3-Clause.html
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function, absolute_import
import glob
import numpy as np
import os.path as osp
from torchreid.utils import read_json, write_json
from ..dataset import ImageDataset
class VIPeR(ImageDataset):
"""VIPeR.
Reference:
Gray et al. Evaluating appearance models for recognition, reacquisition, and tracking. PETS 2007.
URL: `<https://vision.soe.ucsc.edu/node/178>`_
Dataset statistics:
- identities: 632.
- images: 632 x 2 = 1264.
- cameras: 2.
"""
dataset_dir = 'viper'
dataset_url = 'http://users.soe.ucsc.edu/~manduchi/VIPeR.v1.0.zip'
def __init__(self, root='', split_id=0, **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.download_dataset(self.dataset_dir, self.dataset_url)
self.cam_a_dir = osp.join(self.dataset_dir, 'VIPeR', 'cam_a')
self.cam_b_dir = osp.join(self.dataset_dir, 'VIPeR', 'cam_b')
self.split_path = osp.join(self.dataset_dir, 'splits.json')
required_files = [self.dataset_dir, self.cam_a_dir, self.cam_b_dir]
self.check_before_run(required_files)
self.prepare_split()
splits = read_json(self.split_path)
if split_id >= len(splits):
raise ValueError(
'split_id exceeds range, received {}, '
'but expected between 0 and {}'.format(
split_id,
len(splits) - 1
)
)
split = splits[split_id]
train = split['train']
query = split['query'] # query and gallery share the same images
gallery = split['gallery']
train = [tuple(item) for item in train]
query = [tuple(item) for item in query]
gallery = [tuple(item) for item in gallery]
super(VIPeR, self).__init__(train, query, gallery, **kwargs)
def prepare_split(self):
if not osp.exists(self.split_path):
print('Creating 10 random splits of train ids and test ids')
cam_a_imgs = sorted(glob.glob(osp.join(self.cam_a_dir, '*.bmp')))
cam_b_imgs = sorted(glob.glob(osp.join(self.cam_b_dir, '*.bmp')))
assert len(cam_a_imgs) == len(cam_b_imgs)
num_pids = len(cam_a_imgs)
print('Number of identities: {}'.format(num_pids))
num_train_pids = num_pids // 2
"""
In total, there will be 20 splits because each random split creates two
sub-splits, one using cameraA as query and cameraB as gallery
while the other using cameraB as query and cameraA as gallery.
Therefore, results should be averaged over 20 splits (split_id=0~19).
In practice, a model trained on split_id=0 can be applied to split_id=0&1
as split_id=0&1 share the same training data (so on and so forth).
"""
splits = []
for _ in range(10):
order = np.arange(num_pids)
np.random.shuffle(order)
train_idxs = order[:num_train_pids]
test_idxs = order[num_train_pids:]
assert not bool(set(train_idxs) & set(test_idxs)), \
'Error: train and test overlap'
train = []
for pid, idx in enumerate(train_idxs):
cam_a_img = cam_a_imgs[idx]
cam_b_img = cam_b_imgs[idx]
train.append((cam_a_img, pid, 0))
train.append((cam_b_img, pid, 1))
test_a = []
test_b = []
for pid, idx in enumerate(test_idxs):
cam_a_img = cam_a_imgs[idx]
cam_b_img = cam_b_imgs[idx]
test_a.append((cam_a_img, pid, 0))
test_b.append((cam_b_img, pid, 1))
# use cameraA as query and cameraB as gallery
split = {
'train': train,
'query': test_a,
'gallery': test_b,
'num_train_pids': num_train_pids,
'num_query_pids': num_pids - num_train_pids,
'num_gallery_pids': num_pids - num_train_pids
}
splits.append(split)
# use cameraB as query and cameraA as gallery
split = {
'train': train,
'query': test_b,
'gallery': test_a,
'num_train_pids': num_train_pids,
'num_query_pids': num_pids - num_train_pids,
'num_gallery_pids': num_pids - num_train_pids
}
splits.append(split)
print('Totally {} splits are created'.format(len(splits)))
write_json(splits, self.split_path)
print('Split file saved to {}'.format(self.split_path))
|
983,512 | 72c010e5c184e6118d44c888719fdaee3330f399 | def find(li1, li2):
lx = []
lx2 = []
y = z1 = z2 = xx = yy = zz2 = 0
for a1 in li2:
y = y + 1
z1 = 1
for a2 in li1:
z2 = 1
for a3 in li2:
if a2 == a3 and z2 == zz2 + 1:
lx.append(z1)
xx = xx + 1
zz2 = z2
z2 = z2 + 1
if (xx == y):
lx2.append(lx[0])
lx = []
xx = 0
zz2 = 0
z2 = z2 + 1
z1 = z1 + 1
return lx2
print find('gta', 'gt')
print find('aagtaaaa', 'aaa')
print find('gtcgtcgtc', 'gtc')
print find('gtaaacgtcgaataac', 'gtc')
|
983,513 | fc1748e5d7020c09edd38018757f1dd61aad1e8f | from flask import Flask
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
#Config stuff...should move to file
UPLOAD_FOLDER = 'brew_py/uploads'
ALLOWED_EXTENSIONS = set(['xml'])
#initialize app with some coniguration
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://devuser:josh23941@localhost:3306/brew_pye'
app.config['CSRF_ENABLED'] = True
app.config['SECRET_KEY'] = 'josh23941'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#Setup flask-login manager extension
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = '/login'
#imports all routes to views
import controller
#Gets the SQLAlchemy object 'db' after models have been setup
#the db is not attached to the app yet so which allows for easy swapping in of the test db
from models.shared_models import db
|
983,514 | b38f112effab77fa1a70aaea2d069f2b9083b3a5 | import pytest
from A import solve
def test_solve():
assert solve('ABA') == 'Yes'
assert solve('BBA') == 'Yes'
assert solve('BBB') == 'No'
|
983,515 | 4354a604718a97dedb4c46b9e6e0377c53178d05 | import time
seconds = 0
while True:
seconds += 1
print('hello')
time.sleep(seconds)
|
983,516 | e1b8b510572a67f94cb6077688150347ab02eac7 | #!/usr/bin/env python
"""IPv6 HBH MTU record"""
import unittest
from scapy.layers.inet6 import IPv6, Ether, IP, UDP, ICMPv6PacketTooBig, IPv6ExtHdrHopByHop
from scapy.layers.inet import ICMP
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath
from socket import AF_INET, AF_INET6, inet_pton
from util import reassemble4
from scapy.all import *
from scapy.layers.inet6 import _OTypeField, _hbhopts, _hbhoptcls
""" Test_mtu is a subclass of VPPTestCase classes.
MTU tests.
"""
class MTURecord(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "HBH MTU record"
fields_desc = [_OTypeField("otype", 0x3e, _hbhopts),
ByteField("optlen", 4),
ShortField("mtu1", 0),
ShortField("mtu2", 0),]
def alignment_delta(self, curpos): # alignment requirement : 2n+0
x = 2
y = 0
delta = x * ((curpos - y + x - 1) // x) + y - curpos
return delta
def extract_padding(self, p):
return b"", p
_hbhoptcls[0x3E] = MTURecord
class TestHBHMTU(VppTestCase):
""" HBH MTU Test Case """
maxDiff = None
@classmethod
def setUpClass(cls):
super(TestHBHMTU, cls).setUpClass()
cls.create_pg_interfaces(range(2))
cls.interfaces = list(cls.pg_interfaces)
def setUp(self):
super(TestHBHMTU, self).setUp()
for i in self.interfaces:
i.admin_up()
i.config_ip4()
i.config_ip6()
i.disable_ipv6_ra()
i.resolve_arp()
i.resolve_ndp()
def tearDown(self):
super(TestHBHMTU, self).tearDown()
if not self.vpp_dead:
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
def validate(self, rx, expected):
self.assertEqual(rx, expected.__class__(expected))
def validate_bytes(self, rx, expected):
self.assertEqual(rx, expected)
def payload(self, len):
return 'x' * len
def get_mtu(self, sw_if_index):
rv = self.vapi.sw_interface_dump()
for i in rv:
if i.sw_if_index == sw_if_index:
return i.mtu[0]
return 0
def test_ip6_mtu(self):
""" IP6 MTU test """
# Send small packet with HBH option. Verify that router modifies it.
current_mtu = self.get_mtu(self.pg1.sw_if_index)
# MTU (only checked on encap)
self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index, [1280, 0, 0, 0])
self.assertEqual(1280, self.get_mtu(self.pg1.sw_if_index))
p_ether = Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
p_ip6 = IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6)
hbh_mtu_option = MTURecord(mtu1=9000)
p_hbh = IPv6ExtHdrHopByHop(len=0, nh=17, options=hbh_mtu_option)
p_payload = UDP(sport=1234, dport=1234)
p6 = p_ether / p_ip6 / p_hbh / p_payload
p6.show2()
p6_reply = p_ip6 / IPv6ExtHdrHopByHop(len=0, nh=17, options=MTURecord(mtu1=1280)) / p_payload
p6_reply.hlim -= 1
rx = self.send_and_expect(self.pg0, p6*1, self.pg1)
for p in rx:
p.show2()
self.validate(p[1], p6_reply)
'''
# Should fail. Too large MTU
p_icmp6 = ICMPv6PacketTooBig(mtu=1280, cksum=0x4c7a)
icmp6_reply = (IPv6(src=self.pg0.local_ip6,
dst=self.pg0.remote_ip6,
hlim=255, plen=1240) /
p_icmp6 / p_ip6 / p_payload)
icmp6_reply[2].hlim -= 1
n = icmp6_reply.__class__(icmp6_reply)
s = bytes(icmp6_reply)
icmp6_reply_str = s[0:1280]
rx = self.send_and_expect(self.pg0, p6*9, self.pg0)
for p in rx:
self.validate_bytes(bytes(p[1]), icmp6_reply_str)
'''
# Reset MTU
self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index,
[current_mtu, 0, 0, 0])
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
983,517 | 84c1220cb333c5316fa41c19c032fdd7ab39982a | import re
# regex = re.compile(r"[aieyou]",re.I|re.M)
regex = re.compile("""(?sx)
.+ # this match every sumbol""")
str = """
'''TEST
this function 2 {}does something
'''
"""
m = regex.search(str)
print(m.group(0)) |
983,518 | 67c95f034966eb6773b6caa2acd4f8a79a8b47d7 | # In-class exercise
# input a number, iterate 5 times, and get running total;
def main():
total = 0
for _ in range(5):
num = float(input("Please enter a number: "))
total += num
print("Total:", total)
main() |
983,519 | 4a4c811a5b1b91980addbb1ea524d3be86f8bdff | #Building a basic Scheduling algorithm for IR with the given data
#in the excel file.
#****************************************************************
#Importing all required modules
#****************************************************************
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import scipy
import xlwt
import xlrd
#*****************************************************************
Excel_File = "Input Data.xlsx"
Ship_Alive_Days = 10
StartDay = 0
EndDay = 32
def FCFS_SCHEDULING(Excel_File, Ship_Alive_Days, StartDay, EndDay):
Data = input_excel(Excel_File)
Alive_Data,Dead_Data = remove_dead_ship(Data,Ship_Alive_Days)
Sort_Data_Reg = Sort_reg_wise(Alive_Data)
Rake_Avail_Data,Rake_Not_Avail_Data = rake_available(Sort_Data_Reg)
#Capacity_Matrix = input_excel(Capa_Mat_file)
#print "Capacity Matrix",Capacity_Matrix
Capacity_Matrix = ter_cap_matrix(Rake_Avail_Data,StartDay,EndDay)
Scheduled_Data,Updated_Capacity_Matrix = schedulingFCFS(Rake_Avail_Data,Capacity_Matrix,StartDay,EndDay)
workbook = xlwt.Workbook()
write_excel(Rake_Avail_Data,"Rake available",workbook)
write_excel(Scheduled_Data,"Scheduled Data",workbook)
write_excel(Updated_Capacity_Matrix,"Capacity Matrix",workbook)
return Scheduled_Data,Updated_Capacity_Matrix
def input_excel(file_name):
#Taking the data from the excel file as input
workbook = xlrd.open_workbook(file_name)
sheet = workbook.sheet_by_index(0)
#print "Sheet number",z
row = sheet.nrows
column = sheet.ncols
#print row,column
#print sheet
#Appending the values row-wise
row_wise_data = [] #make a data store
for i in range(2,sheet.nrows):
row_wise_data.append(sheet.row_values(i)) #drop all the values in the rows into data
#print "Data from excel sheet",row_wise_data
return row_wise_data
def remove_dead_ship(data,daysAlive):
#Romoving the demand data whose date is exceeded and
#they don't want to keep their shipment alive
for i in range(len(data[0])):
if(data[0][i]=='Ship_alive'):
ShipAliveIndex = i
if(data[0][i]=='WRD'):
WRTindex = i
live_shipment_data = []
dead_shipment_data = []
live_shipment_data.append(data[0])
dead_shipment_data.append(data[0])
for j in range(1,len(data)):
if(data[j][WRTindex] > daysAlive and data[j][ShipAliveIndex] == 'N'):
dead_shipment_data.append(data[j])
else:
live_shipment_data.append(data[j])
#print "Data after removing dead shipments",live_shipment_data
#print "Data of dead shipments",dead_shipment_data
return live_shipment_data,dead_shipment_data
def Sort_reg_wise(alive_data):
for k in range(len(alive_data[0])):
if(alive_data[0][k]=='R_no'):
RegNo = k
break
for i in range(1,len(alive_data)-1):
temp = []
for j in range(i+1,len(alive_data)):
if(alive_data[i][RegNo]>alive_data[j][RegNo]):
temp = alive_data[i]
alive_data[i] = alive_data[j]
alive_data[j] = temp
#print "Data After Acending order of ragistration", alive_data
return alive_data
def rake_available(data):
for k in range(len(data[0])):
if(data[0][k]=='Rake_avail'):
LTrakeAvailIndex = k
break
data_rake_avail = []
data_rake_not_avail = []
data_rake_avail.append(data[0])
data_rake_not_avail.append(data[0])
for i in range(1,len(data )):
if(data[i][LTrakeAvailIndex]=='Y'):
data_rake_avail.append(data[i])
else:
data_rake_not_avail.append(data[i])
#print "Data where rake is available, can load", data_rake_avail
#print "Data where rake is not available", data_rake_not_avail
return data_rake_avail,data_rake_not_avail
def write_excel(data,name,workbook):
sheet = workbook.add_sheet(name)
for a in range(len(data)):
for b in range(len(data[0])):
sheet.write(a,b, data[a][b])
workbook.save('result.xls')
def ter_cap_matrix(Rake_Avail_Data,StartDay,EndDay):
for k in range(len(Rake_Avail_Data[0])):
if(Rake_Avail_Data[0][k] == 'LTj'):
LTindex = k
if(Rake_Avail_Data[0][k]=='UTj'):
UTindex = k
Ter_names =[]
for i in range(1,len(Rake_Avail_Data)):
if(Rake_Avail_Data[i][LTindex] not in Ter_names):
Ter_names.append(Rake_Avail_Data[i][LTindex])
if(Rake_Avail_Data[i][UTindex] not in Ter_names):
Ter_names.append(Rake_Avail_Data[i][UTindex])
#print "List of Terminals Name ",Ter_names
Cap_matrix = []
Cap_matrix.append([''])
Cap_matrix[0] = Cap_matrix[0] + list(xrange(StartDay,EndDay))
for j in range(0,len(Ter_names)):
Cap_matrix.append([Ter_names[j]])
for j1 in range(StartDay+1,EndDay+1):
Cap_matrix[j+1].append(0)
#print "Capacity matrix", Cap_matrix
return Cap_matrix
def schedulingFCFS(data_rake_avail,Cap_matrix,startDay,endDay):
for i1 in range(len(data_rake_avail[0])):
if(data_rake_avail[0][i1] == 'LTj'):
LTindex = i1
if(data_rake_avail[0][i1]=='UTj'):
UTindex = i1
if(data_rake_avail[0][i1]=='Tj'):
TravelTindex = i1
if(data_rake_avail[0][i1]=='Uj'):
UTimeIndex = i1
if(data_rake_avail[0][i1]=='Lj'):
LTimeIndex = i1
if(data_rake_avail[0][i1]=='Pj'):
ProTimeIndex = i1
if(data_rake_avail[0][i1]=='H_lt'):
HCapLTindex = i1
if(data_rake_avail[0][i1]=='H_ut'):
HCapUTindex = i1
schedule = []
sch_heading = ["StartLoadingLT","LeaveLT","WaitAtLT", "ArriveUT","DepartUT","WaitAtUT"]
schedule.append(data_rake_avail[0]+sch_heading)
schedule = schedule + data_rake_avail[1:]
StartDIndex = len(data_rake_avail[0])
LeaveDIndex = StartDIndex + 1
WaitLTIndex = LeaveDIndex + 1
ArriveDIndex = WaitLTIndex +1
DepartDIndex = ArriveDIndex + 1
WaitUTIndex = DepartDIndex + 1
#print "Index of starting day of train from loading terminal",StartDIndex
#print "Index of leaving day of train from loading terminal",LeaveDIndex
#print "Index of Waiting column at Loading terminal for trains",WaitLTIndex
#print "Index of arriving day of train at the unloading terminal",ArriveDIndex
#print "Index of departing day of train at the unloading terminal",DepartDIndex
#print "Index of Waiting column at unloading terminal for trains",WaitUTIndex
#print "Data After adding header", schedule
for k in range(1,len(schedule)):
stop = False
for k1 in range(1,len(Cap_matrix)):
if(Cap_matrix[k1][0]==schedule[k][UTindex]):
print "Planning unloaidng Terminal",schedule[k][UTindex]
for StartDay in range(startDay,endDay):
signal = 0
for k2 in range(StartDay+int(schedule[k][LTimeIndex]+schedule[k][TravelTindex]),int(StartDay+schedule[k][ProTimeIndex])):
#print "range for unloading checking",k2
if(Cap_matrix[k1][k2+1] < schedule[k][HCapUTindex]):
signalUT = 0
signal = signal + signalUT
#print "Signal of availability at unloading terminal",signal
else:
signalUT = 1
signal = signal + signalUT
#print "Signal of non or /availability at unloading terminal",signal
if(signal ==0):
print "Unloading terminal available, now Check for loading requirements"
for k3 in range(1,len(Cap_matrix)):
if(Cap_matrix[k3][0]==schedule[k][LTindex]):
print "Planning for loadidng Terminal",schedule[k][LTindex]
signal1 = 0
#print "Planning Loading TerminalTerminal",Cap_matrix[k3][0]
for k4 in range(StartDay,StartDay+int(schedule[k][LTimeIndex])):
#print "range for loading checking",k4
if(Cap_matrix[k3][k4+1] < schedule[k][HCapLTindex]):
signalLT = 0
signal1 = signal1 + signalLT
#print "Signal of availability at loading terminal",signal1
else:
signalLT = 1
signal1 = signal1 + signalLT
#print "Signal of non/ availability at loading terminal",signal1
if(signal1 == 0):
print"Loading Terminal Available, update the capacity matrix and make the schedule"
schedule[k].append(StartDay)
waitAtLT =0
if(Cap_matrix[k3][StartDay:int(StartDay+schedule[k][LTimeIndex])]>0):
for k6 in range(k-1,0,-1):
if(schedule[k6][LTindex]==schedule[k][LTindex]):
waitAtLT = schedule[k6][LeaveDIndex]-schedule[k][StartDIndex]
break;
if(waitAtLT<0):
waitAtLT =0
schedule[k].append(StartDay+ waitAtLT +schedule[k][LTimeIndex])
schedule[k].append(schedule[k][LeaveDIndex]-schedule[k][StartDIndex])
schedule[k].append(schedule[k][LeaveDIndex]+schedule[k][TravelTindex])
waitAtUT =0
if(Cap_matrix[k1][int(schedule[k][ArriveDIndex]):int(schedule[k][ArriveDIndex]+schedule[k][UTimeIndex])]>0):
for k7 in range(k-1,0,-1):
if(schedule[k7][UTindex]==schedule[k][UTindex]):
print
waitAtUT = schedule[k7][DepartDIndex]-schedule[k][ArriveDIndex]
break;
if(waitAtUT<0):
waitAtUT =0
schedule[k].append(schedule[k][ArriveDIndex]+ waitAtUT +schedule[k][UTimeIndex])
schedule[k].append(schedule[k][DepartDIndex] - schedule[k][ArriveDIndex])
for k8 in range(int(schedule[k][StartDIndex]),int(schedule[k][LeaveDIndex])):
Cap_matrix[k3][k8+1] = Cap_matrix[k3][k8+1]+1
for k9 in range(int(schedule[k][ArriveDIndex]),int(schedule[k][DepartDIndex])):
Cap_matrix[k1][k9+1] = Cap_matrix[k1][k9+1]+1
stop = True
else:
print "Again recheck for next schedule of UT"
else:
print "Unloading terminal not available"
if(stop == True):
print"loop breaked"
break;
if(stop==False):
print "Not able to schedule the train from Terminal -"+str(schedule[k][LTindex])+" to "+str(schedule[k][UTindex]) +"in this time frame"
#print "Updated Data",schedule
#print "Capacity matrix",Cap_matrix
return schedule,Cap_matrix
|
983,520 | 7a731180c3c326996f97299306976301d82bdd9a | # from django.contrib import admin
#
# from .models import Favorite
#
# admin.site.register(Favorite) |
983,521 | a371bfc6c8efc5a6bbe6522967241dd677ed9798 | import logging
logger = logging.getLogger("main")
class CommonMiddleware(object):
"""
リクエストとレスポンスの共通処理
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
self.process_request(request)
response = self.get_response(request)
self.process_response(request, response)
return response
def process_request(self, request):
"""
前処理
:param request:
:return:
"""
logger.info("リクエストの処理")
def process_response(self, request, response):
"""
後処理
:param request:
:param response:
:return:
"""
logger.info("レスポンスの処理") |
983,522 | 155dbbbfa61cb928ee4774d6d1bbc565e65b9ffa | import datetime
import os
import concurrent.futures
import posixpath
if os.getenv("JIRAFTS_DEBUG", False):
import requests_cache
requests_cache.install_cache()
print("Using cache for HTTP queries")
import requests
import xmltodict
class JiraDownloader:
DATE_FORMAT = "%a, %d %b %Y %H:%M:%S %z"
SEARCH_DATE_FORMAT = "%Y-%m-%d %H:%M"
DATE_FIELDS = ["updated", "created"]
def __init__(self, url, auth=None, concurrency=8):
self._client = requests.Session()
self._auth = auth
self._url = url
self._concurrency = concurrency
def iter_issues(self, projects=None, limit=None, per_page=50, min_date=None, max_date=None):
start = 0
if limit:
per_page = min(per_page, limit)
total = 0
query_parts = []
if projects:
query_parts.append("project in ({})".format(", ".join(projects)))
if min_date and max_date:
query_parts.append(
"NOT (updated > '{min_date}' AND updated < '{max_date}')".format(
min_date=min_date.strftime(self.SEARCH_DATE_FORMAT),
max_date=max_date.strftime(self.SEARCH_DATE_FORMAT),
)
)
query = "{} ORDER BY updated DESC".format(" AND ".join(query_parts))
data = self._request_issues(query=query, start=start, per_page=per_page)
total_results = int(data["issue"]["@total"])
if not total_results:
print("No results downloaded. Wrong query or auth data")
return
futures = [concurrent.futures.Future()]
futures[0].set_result(data)
if not limit:
limit = total_results
elif limit > total_results:
limit = total_results
with concurrent.futures.ThreadPoolExecutor(self._concurrency) as tp:
for start in range(per_page, limit, per_page):
fut = tp.submit(self._request_issues, query=query, start=start, per_page=per_page)
futures.append(fut)
try:
for i, fut in enumerate(concurrent.futures.as_completed(futures), 1):
data = fut.result()
items = data["item"]
if not isinstance(items, list):
items = [items]
for item in items:
for field in self.DATE_FIELDS:
item[field] = self._string_to_date(item[field])
yield item, total_results
total += 1
if total > limit:
return
except KeyboardInterrupt:
cancelled = 0
for fut in futures:
if fut.cancel():
cancelled += 1
print("Futures canceled {} / {}. Shutting down loop, please wait...".format(cancelled, len(futures)))
tp.shutdown()
def _request_issues(self, query="order by updated DESC", start=0, per_page=10):
resp = self._client.get(
posixpath.join(self._url, "sr/jira.issueviews:searchrequest-xml/temp/SearchRequest.xml"),
auth=self._auth,
params={
"jqlQuery": query,
"pager/start": start, "tempMax": per_page,
}
)
if resp.status_code != 200:
raise Exception(resp.content)
parsed = self.parse_jira_xml(resp.content)
result = parsed["rss"]["channel"]
return result
def parse_jira_xml(self, content):
return xmltodict.parse(content, dict_constructor=dict)
def _string_to_date(self, s):
return datetime.datetime.strptime(s, self.DATE_FORMAT).replace(tzinfo=None)
|
983,523 | 2fad5c33ee29d66d94a98547b106d9e9493c2e46 | from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('types', views.TypeViewSet)
router.register('objects', views.ObjectViewSet, base_name='object')
router.register('objects-geo', views.ObjectGeoViewSet, base_name='objects-geo')
urlpatterns = [
path('', include(router.urls)),
path('api/', include('rest_framework.urls'))
]
|
983,524 | e1ea14022201e5e9077855db85af27a542469a2a |
# Required libraries: Pandas, Numpy
# raw_data is must be a pandas dataframe.
def dataset_profiler(raw_data):
df_prof=pd.DataFrame(columns=["column name","count","number of unique","number of null value","is binary?","number of 1","data type","fill rate"],index=np.arange(0,len(raw_data.columns)))
columns = raw_data.columns
ctr=0
for column in columns:
df_prof["column name"][ctr]=column
df_prof["count"][ctr]=raw_data[column].count()
df_prof["number of unique"][ctr]=raw_data[column].nunique()
df_prof["number of null value"][ctr] = raw_data[column].isnull().sum()
df_prof["is binary?"][ctr]=False
df_prof["number of 1"][ctr]=0
df_prof["data type"][ctr] = str(raw_data[column].dtype).split('(')[0]
df_prof["fill rate"][ctr] = raw_data[column].count()/len(raw_data)
if raw_data[column].dropna().value_counts().index.isin([0,1]).all()==True and raw_data[column].nunique()==2:
df_prof["is binary?"][ctr]=True
df_prof["number of 1"][ctr]=(raw_data[column]==1).sum()
ctr+=1
return df_prof
|
983,525 | 402af976489167f7b5faa769c6d5b505ffb88f33 | import argparse
from .cli import SearchEngineShell
def parse_args():
parser = argparse.ArgumentParser(description='Search engine'
' cli interface')
return parser.parse_args().__dict__
parse_args()
shell = SearchEngineShell()
shell.cmdloop()
|
983,526 | 8cd4ccfd7b2d49a6cfb1514481a5c0f31014c702 | from rest_framework import serializers
from ..models import Posts
from users.api.serializers import UserModelSerializer
class PostModelDetailSerializer(serializers.ModelSerializer):
user = UserModelSerializer(read_only= True)
class Meta:
model = Posts
fields = "__all__"
class PostModelListSerializer(serializers.ModelSerializer):
class Meta:
model = Posts
fields = [
'content',
'image',
'privacy',
]
|
983,527 | b9c96c2ee3a9d9f99543195e0689ff061f0ae641 | import com.ihsan.foundation.pobjecthelper as phelper
import sys
def FormSetDataEx(uideflist,params):
config = uideflist.Config
#*** Set List Data Debtor
strSQL = GetSQLDebtor(config)
resSQL = config.CreateSQL(strSQL).RawResult
resSQL.First()
dsDebtor = uideflist.uipDebtor.Dataset
while not resSQL.Eof :
rec = dsDebtor.AddRecord()
rec.DebtorId = resSQL.DebtorId
rec.DebtorName = resSQL.DebtorName
resSQL.Next()
# end while
#*** Set List Data Employee + Account Receivable
strSQL = GetSQLEmployee(config)
resSQL = config.CreateSQL(strSQL).RawResult
resSQL.First()
dsEmployee = uideflist.uipEmployee.Dataset
while not resSQL.Eof :
rec = dsEmployee.AddRecord()
rec.EmployeeId = resSQL.EmployeeId
rec.EmployeeName = resSQL.EmployeeName
resSQL.Next()
# end while
def GetSQLEmployee(config,Name=None):
#BranchId = int(config.SecurityContext.GetUserInfo()[2])
#strSQL = "select EmployeeId,EmployeeName from vemployee where branch_id=%d" % oBranch.GroupBranchId
GroupBranchCode = str(config.SecurityContext.GetUserInfo()[3])
# Employee
strSQL = "select EmployeeId,EmployeeName from vemployee a "
strSQL += " where exists( select 1 from transaction.branch tb \
where tb.BranchId=a.branch_id and \
GroupBranchCode= '%s' ) "% GroupBranchCode
if Name != None :
strSQL += " and upper(employeename) like '%%%s%%' " % (Name.upper())
# Existing Account Receivable not include Employee list
strSQL += " union "
strSQL += " select employeeidnumber as EmployeeId,a.accountname as EmployeeName \
from transaction.financialaccount a, transaction.accountreceivable b \
where a.accountno=b.accountno and b.accountreceivabletype='E' \
and exists( select 1 from transaction.branch tb \
where tb.branchcode=a.branchcode and \
GroupBranchCode= '%s' ) " % GroupBranchCode
strSQL += " and not exists( select 1 from transaction.vemployee v, transaction.branch tb \
where tb.branchid=v.branch_id \
and tb.groupbranchcode='%s' \
and v.employeeid=b.employeeidnumber) " % GroupBranchCode
if Name != None :
strSQL += " and upper(accountname) like '%%%s%%' " % (Name.upper())
strSQL += " order by employeename "
return strSQL
def GetSQLDebtor(config,Name=None):
BranchCode = config.SecurityContext.GetUserInfo()[4]
strSQL = "select DebtorId,DebtorName from ExternalDebtor where BranchCode='%s'" % BranchCode
if Name != None :
strSQL += " and upper(DebtorName) like '%%%s%%' " % (Name.upper())
strSQL += " order by DebtorName "
return strSQL
def GetDataDebtor(config,params,returns):
status = returns.CreateValues(
['Is_Err',0],
['Err_Message',''],
)
dsDebtor = returns.AddNewDatasetEx(
'ListDebtor',
';'.join([
'DebtorId: integer',
'DebtorName: string',
'BranchCode: string',
'BranchName: string',
])
)
try:
NameFilter = params.FirstRecord.NameFilter
strSQL = GetSQLDebtor(config,NameFilter)
resSQL = config.CreateSQL(strSQL).RawResult
resSQL.First()
while not resSQL.Eof :
rec = dsDebtor.AddRecord()
rec.DebtorId = resSQL.DebtorId
rec.DebtorName = resSQL.DebtorName
resSQL.Next()
# end while
except:
status.Is_Err = 1
status.Err_Message = str(sys.exc_info()[1])
def GetDataEmployee(config,params,returns):
status = returns.CreateValues(
['Is_Err',0],
['Err_Message',''],
)
dsEmployee = returns.AddNewDatasetEx(
'ListEmployee',
';'.join([
'EmployeeId: integer',
'EmployeeName: string',
'BranchId: integer',
'BranchName: string',
])
)
try:
NameFilter = params.FirstRecord.NameFilter
strSQL = GetSQLEmployee(config,NameFilter)
resSQL = config.CreateSQL(strSQL).RawResult
resSQL.First()
while not resSQL.Eof :
rec = dsEmployee.AddRecord()
rec.EmployeeId = resSQL.EmployeeId
rec.EmployeeName = resSQL.EmployeeName
resSQL.Next()
# end while
except:
status.Is_Err = 1
status.Err_Message = str(sys.exc_info()[1])
|
983,528 | c97b5d70c5da235dd5f30ff68c2ceed6a3e643ba | import rat
import ROOT
import sys
import runCheckTools as RCT
import json
# Get command line arguments
cmdArgs = sys.argv
file_name = '~/Desktop/RAT_files/405_nm_sim/output.root'
fileIterator = rat.dsreader(file_name)
for iEntry, anEntry in enumerate(fileIterator):
print("########################## NEW MC ######################")
MC = anEntry.GetMC()
num_tracks = MC.GetMCTrackCount()
print('num_tracks: ', num_tracks)
track = MC.GetMCTrack(0)
for i in range(track.GetMCTrackStepCount()):
print("######### NEW STEP #########")
step = track.GetMCTrackStep(i)
endpoint = step.GetEndpoint()
volume = step.GetVolume()
momentum = step.GetMomentum()
process = step.GetProcess()
print('endpoint: ', endpoint[0], endpoint[1], endpoint[2])
print('momentum: ', momentum[0], momentum[1], momentum[2])
print('volume: ', volume)
print('process: ', process)
|
983,529 | 974596d40cb51425ea571cf1404d54fbeda932ec | '''print("""
*******************************************************************************
| | | |
_________|________________.=""_;=.______________|_____________________|_______
| | ,-"_,="" `"=.| |
|___________________|__"=._o`"-._ `"=.______________|___________________
| `"=._o`"=._ _`"=._ |
_________|_____________________:=._o "=._."_.-="'"=.__________________|_______
| | __.--" , ; `"=._o." ,-"""-._ ". |
|___________________|_._" ,. .` ` `` , `"-._"-._ ". '__|___________________
| |o`"=._` , "` `; .". , "-._"-._; ; |
_________|___________| ;`-.o`"=._; ." ` '`."\` . "-._ /_______________|_______
| | |o; `"-.o`"=._`` '` " ,__.--o; |
|___________________|_| ; (#) `-.o `"=.`_.--"_o.-; ;___|___________________
____/______/______/___|o;._ " `".o|o_.--" ;o;____/______/______/____
/______/______/______/_"=._o--._ ; | ; ; ;/______/______/______/_
____/______/______/______/__"=._o--._ ;o|o; _._;o;____/______/______/____
/______/______/______/______/____"=._o._; | ;_.--"o.--"_/______/______/______/_
____/______/______/______/______/_____"=.o|o_.--""___/______/______/______/____
/______/______/______/______/______/______/______/______/______/______/_____ /
*******************************************************************************
""")'''
print("Welcome to Treasure Island.")
print("Your mission is to find the treasure.")
while 1:
choice1=input("You are at a cross road. Where do you want to go? LEFT or RIGHT\nType 'L' for LEFT or 'R' for RIGHT: ")
choice1=choice1.upper() # If user has entered choice1 in lower case, No worry. This statement will make it upper case
if choice1=='R':
print("You met with a road accident. You are dead. Game over.")
break
elif choice1=='L':
while 1:
choice2=input('''\nYou came to a lake.here is an island in the middle of the lake. You wanna swim or wait for a boat?
Type 'W' to wait for a boat. Type 'S' to swim across: ''').upper()
if choice2=='S':
print("You are attacked by a big fish. You are dead. Game over.")
break
elif choice2=='W':
while 1:
choice3=input('''\nYou arrive at the island unharmed. There is a house with 3 doors.One red, one yellow and one blue.
Which door you wanna open? Type 'R' foe RED, 'Y' for YELLOW and 'B' for BLUE: ''').upper()
if choice3=='R':
print("You got attacked by an angry trout.You are dead. Game Over.")
break
elif choice3=='Y':
print("You choosed a door that doesn't exist. Game over.")
break
elif choice3=='B':
print("You came across the treasure. You won.")
break
else:
print("Invalid choice!!")
continue
break
else:
print("Invalid choice!!")
continue
break
else:
print("Invalid choice!!")
continue
|
983,530 | 93f11735a800f12c0ce3c42c74d125e346b906a4 | from itertools import combinations as cb
def cal_dist(pos1, pos2):
return abs(pos1[0]-pos2[0]) + abs(pos1[1]-pos2[1])
n, m = map(int, input().split())
city = list()
for i in range(n):
city.append(list(map(int, input().split())))
house = list()
chicken = list()
for i in range(n):
for j in range(n):
if city[i][j] == 1:
house.append([i, j])
elif city[i][j] == 2:
chicken.append([i, j])
ans = 100000000000
for comb in cb(chicken, m):
tmp_sum = 0
for x in house:
tmp_sum += min((cal_dist(x, pos)) for pos in comb)
ans = min(ans, tmp_sum)
print(ans)
|
983,531 | 7b3a3ce6c1c4650866c1f36697fe9e9705701ff7 | """
All my custom functions which are used in the Telegram Bot
"""
def welcome(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text='Welcome to my Bot')
def unknown(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text='Invalid Command!')
|
983,532 | 2724e3719380d62afa5466745d7efb31d193b48a | #!/usr/bin/env python
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from stix.core import STIXPackage
def parse_stix(pkg):
print("== INCIDENT ==")
for inc in pkg.incidents:
print("---")
print("Reporter: " + inc.reporter.identity.name)
print("Title: " + inc.title)
print("Description: " + str(inc.description))
print("Confidence: " + str(inc.confidence.value))
for impact in inc.impact_assessment.effects:
print("Impact: " + str(impact))
print("Initial Compromise: " + str(inc.time.initial_compromise.value))
print("Incident Discovery: " + str(inc.time.incident_discovery.value))
print("Restoration Achieved: " + str(inc.time.restoration_achieved.value))
print("Incident Reported: " + str(inc.time.incident_reported.value))
for victim in inc.victims:
print("Victim: " + str(victim.name))
return 0
if __name__ == '__main__':
try:
fname = sys.argv[1]
except:
exit(1)
fd = open(fname)
stix_pkg = STIXPackage.from_xml(fd)
parse_stix(stix_pkg)
|
983,533 | 87d0f471e64d199081d983be7c5fb1654d0daa06 | # Copyright (c) OpenMMLab. All rights reserved.
from .sep_aspp_contrast_head import DepthwiseSeparableASPPContrastHead
__all__ = ['DepthwiseSeparableASPPContrastHead']
|
983,534 | cf1a839c5d80d570e7d08538c3aa9bf2c7a98a7b | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 by Brian Horn, trycatchhorn@gmail.com.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Provides a data structure used to model a visitor.
"""
__author__ = "Brian Horn"
__copyright__ = "Copyright (c) 2015 Brian Horn"
__credits__ = "Brian Horn"
__license__ = "MIT"
__version__ = "1.0.2"
__maintainer__ = "Brian Horn"
__email__ = "trycatchhorn@gmail.com"
__status__ = "Prototype"
from abc import abstractmethod
class Visitor(object):
"""
The interface of a general vistor.
"""
def __init__(self):
"""
Constructs a vistor.
"""
super(Visitor, self).__init__()
@abstractmethod
def visit(self, obj):
"""
The default visit method does nothing.
@param obj: The object to be visited.
@type: C{object}
"""
pass
@staticmethod
def is_done():
"""
The default behaviour of this visitor
is to always return False, when asked
if the visitor is done operating.
@return: False, since the visitor is not done.
@rtype: C{bool}
"""
return False
|
983,535 | 47cdc4f78ceccb0038c0e5aab3f8aa8e429bd2aa | # https://leetcode.com/explore/learn/card/data-structure-tree/17/solve-problems-recursively/537/
def hasPathSum(self, root: TreeNode, sum: int):
if not root:
return False;
# helper keeps track of sum
def helper(node, _sum, currSum):
if not node:
return False
currSum += node.val
# first need exit condition
if not node.left and not node.right:
return currSum == _sum
return helper(node.left, _sum, currSum) or helper(node.right, _sum, currSum)
return helper(root, sum, 0) |
983,536 | e2010b3ae6487ac0111734d0f6c3494f89f66cfa | from django.shortcuts import render, redirect
from .forms import NewUserForm
from .models import Visitor, Visit
from django.contrib import messages
import datetime
import uuid
##Second Form
'''
def index(request):
form = FirstTimeUserForm()
first_name, last_name, email, purpose1, purpose2, purpose3, purpose4, purpose5, purpose6, purpose7, purpose8, time = request.session['first_name'], request.session['last_name'], request.session['email'], request.session['purpose1'], request.session['purpose2'], request.session['purpose3'], request.session['purpose4'], request.session['purpose5'], request.session['purpose6'], request.session['purpose7'], request.session['purpose8'], request.session['time']
if request.method == "POST":
form = FirstTimeUserForm(request.POST)
if form.is_valid():
form.save(commit=True)
visitor = Visitor.objects.order_by()[len(Visitor.objects.order_by())-1]
address = visitor.address
gender = visitor.gender
referral = visitor.referral
for p in purpose1:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
for p in purpose2:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
for p in purpose3:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
for p in purpose4:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
for p in purpose5:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
for p in purpose6:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
for p in purpose7:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
for p in purpose8:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
uid=str(uuid.uuid1()), time=time,
referral=referral, purpose=p
)
t.save()
messages.success(request, f'You have successfully checked in!')
return redirect('user')
return render(request, 'visitors/index.html', context={'form': form})
'''
##First Form
def users(request):
form = NewUserForm()
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
form.save(commit=True)
visitor = Visitor.objects.order_by()[len(Visitor.objects.order_by())-1]
first_name = visitor.first_name
last_name = visitor.last_name
email = visitor.email
address = visitor.address
phone_number = visitor.phone_number
city = visitor.city
gender = visitor.gender
referral = visitor.referral
##Additional Fields
dob = visitor.dob
household_income = visitor.household_income
household_number = visitor.household_number
race = visitor.race
marital_status = visitor.marital_status
veteran = visitor.veteran
disabled = visitor.disabled
##Purpose
purpose1 = visitor.purpose1
purpose2 = visitor.purpose2
purpose3 = visitor.purpose3
purpose4 = visitor.purpose4
purpose5 = visitor.purpose5
purpose6 = visitor.purpose6
purpose7 = visitor.purpose7
purpose8 = visitor.purpose8
purpose9 = visitor.purpose9
currentDT = datetime.datetime.now()
date = str(currentDT.strftime("%m/%d/%Y"))
time = str(currentDT.strftime("%I:%M:%S %p"))
'''
if (purpose1 == "None") and (purpose2 == "None") and (purpose3 =="None") and (purpose4 == "None") and (purpose5 == "None") and (purpose6 == "None") and (purpose7 == "None") and (purpose8 == "None"):
messages.warning(request, f'Error! Please fill out the form completely and select a purpose for coming in')
else:
print(purpose1)
'''
for p in purpose1:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Employment Assistance",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose2:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Training Programs",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose3:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Financial Information",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose4:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Workshops",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose5:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Housing or Rental Assistance",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose6:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Young Adult Programs / Youth Programs",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose7:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Youth Education",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose8:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Volunteering",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
for p in purpose9:
t = Visit(
first_name=first_name, last_name=last_name,
email=email, gender=gender, address=address,
city=city, uid=str(uuid.uuid1()), date=date, time=time,
referral=referral, purpose=p, major_purpose="Other",
phone_number=phone_number, dob=dob, household_income=household_income,
household_number=household_number, race=race,
marital_status=marital_status, veteran=veteran, disabled=disabled
)
t.save()
messages.success(request, f'You have successfully checked in!')
return redirect('user')
else:
print('ERROR FORM INVALID')
messages.warning(request, f'Error! Please fill out the form completely and select a purpose for coming in')
return render(request, 'visitors/users.html', {'form': form})
|
983,537 | f658737f27fe01cf092ac75c04fd838591b4c763 | # purpose: provide a data structure (dict) of all yeast introns in yeast SGD reference
# genome.
#
# This was created using gffutils (https://pythonhosted.org/gffutils/contents.html)
#
# import gffutils
# from collections import defaultdict
# import yeast_Gene_name_to_ORF as yg
#
# tdb = gffutils.FeatureDB('yeast.db', keep_order=True)
# introns = defaultdict(dict)
#
#
# with open('yeast-intron-list.txt','w') as out:
# for gene in yg.geneToOrf.itervalues():
# for f in tdb.children( gene):
# if f.featuretype == 'intron':
# if gene not in introns:
# introns[gene]['chrom'] = f.chrom
# introns[gene]['start'] = []
# introns[gene]['start'].append(f.start)
# introns[gene]['stop'] = []
# introns[gene]['stop'].append(f.stop)
# else:
# introns[gene]['start'].append(f.start)
# introns[gene]['stop'].append(f.stop)
# ct = 0
# for k,v in introns.iteritems():
# ct += 1
# out.write( '\"%s\" : %s,' %( k, v) )
# if ct == 3:
# out.write("\n")
# ct = 0
#
# Author: Mike Place
#
introns = { "YHR218W" : {'stop': [558714], 'start': [558616], 'chrom': 'VIII'},"YLR445W" : {'stop': [1024654], 'start': [1024573], 'chrom': 'XII'},"YBL026W" : {'stop': [170804], 'start': [170677], 'chrom': 'II'},
"YPR187W" : {'stop': [911352], 'start': [911277], 'chrom': 'XVI'},"YGL087C" : {'stop': [346893], 'start': [346809], 'chrom': 'VII'},"YBR181C" : {'stop': [592768], 'start': [592417], 'chrom': 'II'},
"YBR090C" : {'stop': [426873], 'start': [426517], 'chrom': 'II'},"YDR025W" : {'stop': [491898], 'start': [491560], 'chrom': 'IV'},"YGR001C" : {'stop': [497458, 497999], 'start': [497366, 497938], 'chrom': 'VII'},
"YDR535C" : {'stop': [1507314], 'start': [1507059], 'chrom': 'IV'},"YMR125W" : {'stop': [517885], 'start': [517564], 'chrom': 'XIII'},"YML124C" : {'stop': [23658], 'start': [23361], 'chrom': 'XIII'},
"YIL156W-B" : {'stop': [47760], 'start': [47699], 'chrom': 'IX'},"YKR057W" : {'stop': [552002], 'start': [551681], 'chrom': 'XI'},"YGR296W" : {'stop': [1085030], 'start': [1084883], 'chrom': 'VII'},
"YJR145C" : {'stop': [703054], 'start': [702799], 'chrom': 'X'},"YDL125C" : {'stop': [239509], 'start': [239399], 'chrom': 'IV'},"YPL249C-A" : {'stop': [76223], 'start': [75986], 'chrom': 'XVI'},
"YML017W" : {'stop': [236953], 'start': [236592], 'chrom': 'XIII'},"YKL006C-A" : {'stop': [430596], 'start': [430456], 'chrom': 'XI'},"YNL301C" : {'stop': [64450], 'start': [64019], 'chrom': 'XIV'},
"YML036W" : {'stop': [206203], 'start': [206098], 'chrom': 'XIII'},"YBR111W-A" : {'stop': [462289, 462499], 'start': [462210, 462430], 'chrom': 'II'},"YHR077C" : {'stop': [255750], 'start': [255638], 'chrom': 'VIII'},
"YKL006W" : {'stop': [432432], 'start': [432035], 'chrom': 'XI'},"YDR424C" : {'stop': [1319697, 1319816], 'start': [1319618, 1319721], 'chrom': 'IV'},"YLR048W" : {'stop': [242680], 'start': [242322], 'chrom': 'XII'},
"YBR119W" : {'stop': [479434], 'start': [479346], 'chrom': 'II'},"YMR292W" : {'stop': [854898], 'start': [854817], 'chrom': 'XIII'},"Q0255" : {'stop': [75662, 75903], 'start': [75623, 75873], 'chrom': 'Mito'},
"YLR464W" : {'stop': [1067363], 'start': [1067085], 'chrom': 'XII'},"YJL191W" : {'stop': [74204], 'start': [73797], 'chrom': 'X'},"YDR129C" : {'stop': [715358], 'start': [715248], 'chrom': 'IV'},
"YKL190W" : {'stop': [83074], 'start': [82999], 'chrom': 'XI'},"YEL076C-A" : {'stop': [4601], 'start': [4323], 'chrom': 'V'},"YPL090C" : {'stop': [378389], 'start': [377996], 'chrom': 'XVI'},
"YFL034C-B" : {'stop': [63973], 'start': [63860], 'chrom': 'VI'},"YNL302C" : {'stop': [62923], 'start': [62373], 'chrom': 'XIV'},"YBL027W" : {'stop': [168808], 'start': [168425], 'chrom': 'II'},
"YLR448W" : {'stop': [1029252], 'start': [1028869], 'chrom': 'XII'},"YNL096C" : {'stop': [444171], 'start': [443827], 'chrom': 'XIV'},"YLR275W" : {'stop': [694472], 'start': [694383], 'chrom': 'XII'},
"YGR034W" : {'stop': [556307], 'start': [555831], 'chrom': 'VII'},"YHR076W" : {'stop': [251248], 'start': [251156], 'chrom': 'VIII'},"YGL232W" : {'stop': [62189], 'start': [62132], 'chrom': 'VII'},
"YJL205C" : {'stop': [50411], 'start': [50269], 'chrom': 'X'},"YLR078C" : {'stop': [286556], 'start': [286468], 'chrom': 'XII'},"YHR041C" : {'stop': [189850], 'start': [189750], 'chrom': 'VIII'},
"YCR097W" : {'stop': [293993, 294291], 'start': [293940, 294240], 'chrom': 'III'},"YDL064W" : {'stop': [337634], 'start': [337525], 'chrom': 'IV'},"YPL129W" : {'stop': [305411], 'start': [305307], 'chrom': 'XVI'},
"YIL148W" : {'stop': [69149], 'start': [68716], 'chrom': 'IX'},"YLR054C" : {'stop': [250947], 'start': [250861], 'chrom': 'XII'},"YPL079W" : {'stop': [407067], 'start': [406647], 'chrom': 'XVI'},
"YDR397C" : {'stop': [1266861], 'start': [1266770], 'chrom': 'IV'},"YKR005C" : {'stop': [450020], 'start': [449952], 'chrom': 'XI'},"YLR211C" : {'stop': [564513], 'start': [564455], 'chrom': 'XII'},
"YPL175W" : {'stop': [218746], 'start': [218647], 'chrom': 'XVI'},"YER179W" : {'stop': [548644], 'start': [548553], 'chrom': 'V'},"YJL136C" : {'stop': [157249], 'start': [156790], 'chrom': 'X'},
"YOR096W" : {'stop': [506338], 'start': [505938], 'chrom': 'XV'},"YBL050W" : {'stop': [125270], 'start': [125155], 'chrom': 'II'},"YOR312C" : {'stop': [901193], 'start': [900768], 'chrom': 'XV'},
"Q0055" : {'stop': [16434], 'start': [13987], 'chrom': 'Mito'},"YDR450W" : {'stop': [1360404], 'start': [1359970], 'chrom': 'IV'},"YER133W" : {'stop': [433196], 'start': [432672], 'chrom': 'V'},
"YBL087C" : {'stop': [60697], 'start': [60194], 'chrom': 'II'},"YPR202W" : {'stop': [943198], 'start': [943051], 'chrom': 'XVI'},"YER093C-A" : {'stop': [348276], 'start': [348202], 'chrom': 'V'},
"YAL001C" : {'stop': [151096], 'start': [151007], 'chrom': 'I'},"YHR203C" : {'stop': [505516], 'start': [505248], 'chrom': 'VIII'},"YIL018W" : {'stop': [317171], 'start': [316772], 'chrom': 'IX'},
"YDL075W" : {'stop': [322703], 'start': [322283], 'chrom': 'IV'},"YDL130W" : {'stop': [230320], 'start': [230020], 'chrom': 'IV'},"YFL039C" : {'stop': [54686], 'start': [54378], 'chrom': 'VI'},
"YMR142C" : {'stop': [551203], 'start': [550802], 'chrom': 'XIII'},"YCR028C-A" : {'stop': [173198], 'start': [173116], 'chrom': 'III'},"YEL012W" : {'stop': [131899], 'start': [131777], 'chrom': 'V'},
"YLR093C" : {'stop': [327399], 'start': [327259], 'chrom': 'XII'},"YDL029W" : {'stop': [399484], 'start': [399362], 'chrom': 'IV'},"YJR079W" : {'stop': [581052], 'start': [580348], 'chrom': 'X'},
"YPR043W" : {'stop': [654570], 'start': [654168], 'chrom': 'XVI'},"YPL198W" : {'stop': [173571, 174072], 'start': [173163, 173666], 'chrom': 'XVI'},"YBL040C" : {'stop': [142846], 'start': [142750], 'chrom': 'II'},
"YIL111W" : {'stop': [155310], 'start': [155223], 'chrom': 'IX'},"YLL067C" : {'stop': [4014], 'start': [3916], 'chrom': 'XII'},"YGL178W" : {'stop': [167994], 'start': [167355], 'chrom': 'VII'},
"YHR039C-A" : {'stop': [187676], 'start': [187515], 'chrom': 'VIII'},"YKL156W" : {'stop': [158966], 'start': [158616], 'chrom': 'XI'},"YJL001W" : {'stop': [435343], 'start': [435228], 'chrom': 'X'},
"YDR447C" : {'stop': [1355550], 'start': [1355237], 'chrom': 'IV'},"YGR118W" : {'stop': [727357], 'start': [727039], 'chrom': 'VII'},"YML085C" : {'stop': [99375], 'start': [99260], 'chrom': 'XIII'},
"YLR202C" : {'stop': [550574], 'start': [550459], 'chrom': 'XII'},"YGR214W" : {'stop': [921119], 'start': [920665], 'chrom': 'VII'},"YOR318C" : {'stop': [912432], 'start': [912086], 'chrom': 'XV'},
"YGR225W" : {'stop': [946420], 'start': [946328], 'chrom': 'VII'},"YOL127W" : {'stop': [80774], 'start': [80361], 'chrom': 'XV'},"YOR234C" : {'stop': [779386], 'start': [778860], 'chrom': 'XV'},
"YOL120C" : {'stop': [94290], 'start': [93844], 'chrom': 'XV'},"YHR199C-A" : {'stop': [498786], 'start': [498720], 'chrom': 'VIII'},"YOR293W" : {'stop': [867586], 'start': [867150], 'chrom': 'XV'},
"YML056C" : {'stop': [163716], 'start': [163309], 'chrom': 'XIII'},"YMR143W" : {'stop': [552495], 'start': [551952], 'chrom': 'XIII'},"YDL191W" : {'stop': [118157], 'start': [117667], 'chrom': 'IV'},
"YDR064W" : {'stop': [580017], 'start': [579479], 'chrom': 'IV'},"YIL106W" : {'stop': [166519], 'start': [166435], 'chrom': 'IX'},"YHR101C" : {'stop': [315858], 'start': [315772], 'chrom': 'VIII'},
"YML067C" : {'stop': [140183], 'start': [140091], 'chrom': 'XIII'},"YIL004C" : {'stop': [348494], 'start': [348364], 'chrom': 'IX'},"YBR084C-A" : {'stop': [415259], 'start': [414754], 'chrom': 'II'},
"YLL050C" : {'stop': [40400], 'start': [40222], 'chrom': 'XII'},"YLR344W" : {'stop': [819777], 'start': [819331], 'chrom': 'XII'},"YIL177C" : {'stop': [4986], 'start': [4599], 'chrom': 'IX'},
"YBR215W" : {'stop': [653452], 'start': [653369], 'chrom': 'II'},"YHL001W" : {'stop': [104803], 'start': [104406], 'chrom': 'VIII'},"YJL031C" : {'stop': [387435], 'start': [387349], 'chrom': 'X'},
"YDR005C" : {'stop': [458097], 'start': [458018], 'chrom': 'IV'},"YER003C" : {'stop': [159087], 'start': [158995], 'chrom': 'V'},"YNL147W" : {'stop': [351053], 'start': [350958], 'chrom': 'XIV'},
"YJL041W" : {'stop': [365902], 'start': [365785], 'chrom': 'X'},"YER044C-A" : {'stop': [239711], 'start': [239624], 'chrom': 'V'},"YER074W-A" : {'stop': [307848, 308067], 'start': [307747, 307957], 'chrom': 'V'},
"YMR225C" : {'stop': [721345], 'start': [721199], 'chrom': 'XIII'},"YMR033W" : {'stop': [337903], 'start': [337818], 'chrom': 'XIII'},"Q0070" : {'stop': [16434, 18953, 20507, 21994], 'start': [13987, 16471, 18992, 20985], 'chrom': 'Mito'},
"YIL069C" : {'stop': [232366], 'start': [231958], 'chrom': 'IX'},"YNL265C" : {'stop': [145254], 'start': [145150], 'chrom': 'XIV'},"YFR024C-A" : {'stop': [203386], 'start': [203293], 'chrom': 'VI'},
"YGL183C" : {'stop': [157282], 'start': [157200], 'chrom': 'VII'},"YMR133W" : {'stop': [537564], 'start': [537449], 'chrom': 'XIII'},"Q0065" : {'stop': [16434, 18953, 20507], 'start': [13987, 16471, 18992], 'chrom': 'Mito'},
"YKR095W-A" : {'stop': [625976], 'start': [625902], 'chrom': 'XI'},"YFR031C-A" : {'stop': [221414], 'start': [221268], 'chrom': 'VI'},"YBR230C" : {'stop': [680039], 'start': [679943], 'chrom': 'II'},
"YOL121C" : {'stop': [92830], 'start': [92441], 'chrom': 'XV'},"YNL138W-A" : {'stop': [366157], 'start': [366036], 'chrom': 'XIV'},"Q0110" : {'stop': [37722], 'start': [36955], 'chrom': 'Mito'},
"YLR128W" : {'stop': [398626], 'start': [398533], 'chrom': 'XII'},"YDR059C" : {'stop': [569723], 'start': [569634], 'chrom': 'IV'},"YMR242C" : {'stop': [754219], 'start': [753743], 'chrom': 'XIII'},
"YPR132W" : {'stop': [795394], 'start': [795030], 'chrom': 'XVI'},"YNL050C" : {'stop': [534965], 'start': [534875], 'chrom': 'XIV'},"YNL112W" : {'stop': [415913], 'start': [414912], 'chrom': 'XIV'},
"YBR191W" : {'stop': [606668], 'start': [606281], 'chrom': 'II'},"YBR062C" : {'stop': [366584], 'start': [366503], 'chrom': 'II'},"YOR122C" : {'stop': [552874], 'start': [552666], 'chrom': 'XV'},
"YGL033W" : {'stop': [435749], 'start': [435680], 'chrom': 'VII'},"YML073C" : {'stop': [124157], 'start': [123743], 'chrom': 'XIII'},"YCR031C" : {'stop': [178213], 'start': [177907], 'chrom': 'III'},
"YMR201C" : {'stop': [667017], 'start': [666934], 'chrom': 'XIII'},"YNL339C" : {'stop': [6079], 'start': [5932], 'chrom': 'XIV'},"YBR082C" : {'stop': [407122], 'start': [407028], 'chrom': 'II'},
"YML094W" : {'stop': [82373], 'start': [82291], 'chrom': 'XIII'},"YBL018C" : {'stop': [186427], 'start': [186353], 'chrom': 'II'},"YGL251C" : {'stop': [31578], 'start': [31427], 'chrom': 'VII'},
"YDL108W" : {'stop': [267806], 'start': [267726], 'chrom': 'IV'},"YJR112W-A" : {'stop': [637858], 'start': [637810], 'chrom': 'X'},"YKR094C" : {'stop': [618742], 'start': [618375], 'chrom': 'XI'},
"YNR053C" : {'stop': [722302], 'start': [721771], 'chrom': 'XIV'},"YBR078W" : {'stop': [393510], 'start': [393181], 'chrom': 'II'},"YIL133C" : {'stop': [99385], 'start': [99096], 'chrom': 'IX'},
"YML024W" : {'stop': [226289], 'start': [225892], 'chrom': 'XIII'},"YLR367W" : {'stop': [857057], 'start': [856575], 'chrom': 'XII'},"Q0060" : {'stop': [16434, 18953], 'start': [13987, 16471], 'chrom': 'Mito'},
"YDR381C-A" : {'stop': [1238824], 'start': [1238631], 'chrom': 'IV'},"YBL111C" : {'stop': [4215], 'start': [4117], 'chrom': 'II'},"YJL189W" : {'stop': [76324], 'start': [75939], 'chrom': 'X'},
"YIL052C" : {'stop': [257026], 'start': [256555], 'chrom': 'IX'},"YNL012W" : {'stop': [609874], 'start': [609791], 'chrom': 'XIV'},"YML133C" : {'stop': [3890], 'start': [3792], 'chrom': 'XIII'},
"YER007C-A" : {'stop': [166874], 'start': [166772], 'chrom': 'V'},"YCL002C" : {'stop': [111633], 'start': [111558], 'chrom': 'III'},"YDR139C" : {'stop': [733775], 'start': [733703], 'chrom': 'IV'},
"YBR219C" : {'stop': [663002], 'start': [662582], 'chrom': 'II'},"YMR116C" : {'stop': [500151], 'start': [499879], 'chrom': 'XIII'},"Q0105" : {'stop': [37722, 39140, 40840, 42507, 43296], 'start': [36955, 37737, 39218, 41091, 42559], 'chrom': 'Mito'},
"YHL050C" : {'stop': [2670], 'start': [1898], 'chrom': 'VIII'},"YGL137W" : {'stop': [250086], 'start': [249887], 'chrom': 'VII'},"YLR061W" : {'stop': [263594], 'start': [263206], 'chrom': 'XII'},
"YLR306W" : {'stop': [744287], 'start': [744154], 'chrom': 'XII'},"YGL030W" : {'stop': [439323], 'start': [439094], 'chrom': 'VII'},"YIL073C" : {'stop': [225899], 'start': [225810], 'chrom': 'IX'},
"YPL075W" : {'stop': [413012], 'start': [412262], 'chrom': 'XVI'},"YFR045W" : {'stop': [242081], 'start': [242010], 'chrom': 'VI'},"YER074W" : {'stop': [306791], 'start': [306326], 'chrom': 'V'},
"YBL059W" : {'stop': [110948], 'start': [110880], 'chrom': 'II'},"YNL069C" : {'stop': [494973], 'start': [494525], 'chrom': 'XIV'},"YGR029W" : {'stop': [543721], 'start': [543639], 'chrom': 'VII'},
"YJL024C" : {'stop': [396570], 'start': [396494], 'chrom': 'X'},"YFL034C-A" : {'stop': [64920], 'start': [64600], 'chrom': 'VI'},"YHR141C" : {'stop': [382747], 'start': [382307], 'chrom': 'VIII'},
"YOL047C" : {'stop': [242504], 'start': [242442], 'chrom': 'XV'},"Q0075" : {'stop': [24905], 'start': [24871], 'chrom': 'Mito'},"YPR010C-A" : {'stop': [582701], 'start': [582559], 'chrom': 'XVI'},
"YHR012W" : {'stop': [129647], 'start': [129529], 'chrom': 'VIII'},"YNL312W" : {'stop': [48401], 'start': [48294], 'chrom': 'XIV'},"YHR123W" : {'stop': [354955], 'start': [354865], 'chrom': 'VIII'},
"YBL059C-A" : {'stop': [110505], 'start': [110421], 'chrom': 'II'},"YFL031W" : {'stop': [76091], 'start': [75840], 'chrom': 'VI'},"YPL241C" : {'stop': [96233], 'start': [96154], 'chrom': 'XVI'},
"YMR194C-B" : {'stop': [652847], 'start': [652776], 'chrom': 'XIII'},"YBL091C-A" : {'stop': [47146], 'start': [47059], 'chrom': 'II'},"YPR153W" : {'stop': [833827], 'start': [833694], 'chrom': 'XVI'},
"Q0045" : {'stop': [16434, 18953, 20507, 21994, 23611, 25317, 26228], 'start': [13987, 16471, 18992, 20985, 22247, 23747, 25343], 'chrom': 'Mito'},"YKL157W" : {'stop': [155654], 'start': [155272], 'chrom': 'XI'},"YJL225C" : {'stop': [4969], 'start': [4582], 'chrom': 'X'},
"YLL066C" : {'stop': [9549], 'start': [9451], 'chrom': 'XII'},"YPR063C" : {'stop': [678279], 'start': [678194], 'chrom': 'XVI'},"YPL143W" : {'stop': [282665], 'start': [282141], 'chrom': 'XVI'},
"YKR004C" : {'stop': [447810], 'start': [447707], 'chrom': 'XI'},"YAL030W" : {'stop': [87500], 'start': [87388], 'chrom': 'I'},"YOL048C" : {'stop': [241025], 'start': [240948], 'chrom': 'XV'},
"YDR367W" : {'stop': [1212978], 'start': [1212878], 'chrom': 'IV'},"YDL012C" : {'stop': [431472], 'start': [431387], 'chrom': 'IV'},"Q0115" : {'stop': [37722, 39140], 'start': [36955, 37737], 'chrom': 'Mito'},
"YGL226C-A" : {'stop': [73137], 'start': [72989], 'chrom': 'VII'},"YNL004W" : {'stop': [623286], 'start': [622945], 'chrom': 'XIV'},"YDR471W" : {'stop': [1402184], 'start': [1401801], 'chrom': 'IV'},
"YHR021C" : {'stop': [148666], 'start': [148117], 'chrom': 'VIII'},"YDL082W" : {'stop': [308792], 'start': [308428], 'chrom': 'IV'},"YJR021C" : {'stop': [469263], 'start': [469184], 'chrom': 'X'},
"YHR001W-A" : {'stop': [107894], 'start': [107832], 'chrom': 'VIII'},"YJL177W" : {'stop': [91411], 'start': [91095], 'chrom': 'X'},"YLR185W" : {'stop': [523028], 'start': [522670], 'chrom': 'XII'},
"YMR230W" : {'stop': [732875], 'start': [732466], 'chrom': 'XIII'},"YMR194W" : {'stop': [651623], 'start': [651161], 'chrom': 'XIII'},"YDL219W" : {'stop': [65377], 'start': [65307], 'chrom': 'IV'},
"YBR189W" : {'stop': [604927], 'start': [604515], 'chrom': 'II'},"YPL081W" : {'stop': [405457], 'start': [404957], 'chrom': 'XVI'},"YHR079C-A" : {'stop': [262440], 'start': [262355], 'chrom': 'VIII'},
"YGR183C" : {'stop': [859473], 'start': [859261], 'chrom': 'VII'},"YLR329W" : {'stop': [786712], 'start': [786616], 'chrom': 'XII'},"YLR199C" : {'stop': [548763], 'start': [548678], 'chrom': 'XII'},
"YHR016C" : {'stop': [138408], 'start': [138241], 'chrom': 'VIII'},"YER117W" : {'stop': [397281], 'start': [396811], 'chrom': 'V'},"YLR426W" : {'stop': [987212], 'start': [987142], 'chrom': 'XII'},
"YCL012C" : {'stop': [101700], 'start': [101634], 'chrom': 'III'},"YBR048W" : {'stop': [333386], 'start': [332876], 'chrom': 'II'},"YPL283C" : {'stop': [5988], 'start': [5841], 'chrom': 'XVI'},
"YNL130C" : {'stop': [380781], 'start': [380690], 'chrom': 'XIV'},"YNL162W" : {'stop': [331837], 'start': [331326], 'chrom': 'XIV'},"YLR406C" : {'stop': [931698], 'start': [931350], 'chrom': 'XII'},
"YDR318W" : {'stop': [1103892], 'start': [1103810], 'chrom': 'IV'},"YPL031C" : {'stop': [493020], 'start': [492919], 'chrom': 'XVI'},"YPR098C" : {'stop': [729481], 'start': [729386], 'chrom': 'XVI'},
"YAL003W" : {'stop': [142619], 'start': [142254], 'chrom': 'I'},"YML026C" : {'stop': [223781], 'start': [223381], 'chrom': 'XIII'},"YGL076C" : {'stop': [365432, 365985], 'start': [364965, 365527], 'chrom': 'VII'},
"YMR079W" : {'stop': [425153], 'start': [424998], 'chrom': 'XIII'},"YDR305C" : {'stop': [1073401], 'start': [1073313], 'chrom': 'IV'},"YPL218W" : {'stop': [138864], 'start': [138726], 'chrom': 'XVI'},
"YPL109C" : {'stop': [345596], 'start': [345445], 'chrom': 'XVI'},"Q0120" : {'stop': [37722, 39140, 40840], 'start': [36955, 37737, 39218], 'chrom': 'Mito'},"YBR186W" : {'stop': [602216], 'start': [602104], 'chrom': 'II'},
"YKL180W" : {'stop': [109883], 'start': [109578], 'chrom': 'XI'},"YHR010W" : {'stop': [127112], 'start': [126552], 'chrom': 'VIII'},"YDR381W" : {'stop': [1237608], 'start': [1236843], 'chrom': 'IV'},
"YGL103W" : {'stop': [311526], 'start': [311016], 'chrom': 'VII'},"YPR028W" : {'stop': [623710], 'start': [623578], 'chrom': 'XVI'},"YER056C-A" : {'stop': [270148], 'start': [269752], 'chrom': 'V'},
"YKL002W" : {'stop': [437905], 'start': [437838], 'chrom': 'XI'},"YCL005W-A" : {'stop': [107110, 107287], 'start': [107034, 107192], 'chrom': 'III'},"YDL083C" : {'stop': [307765], 'start': [307334], 'chrom': 'IV'},
"YBR255C-A" : {'stop': [727011], 'start': [726918], 'chrom': 'II'},"YML034W" : {'stop': [211570], 'start': [211445], 'chrom': 'XIII'},"YOR182C" : {'stop': [678790], 'start': [678380], 'chrom': 'XV'},
"YJR094W-A" : {'stop': [608581], 'start': [608307], 'chrom': 'X'},"YEL003W" : {'stop': [148282], 'start': [148195], 'chrom': 'V'},"YNL038W" : {'stop': [557684], 'start': [557611], 'chrom': 'XIV'},
"YDL136W" : {'stop': [218007], 'start': [217603], 'chrom': 'IV'},"YDL115C" : {'stop': [255044], 'start': [254975], 'chrom': 'IV'},"YKL081W" : {'stop': [283421], 'start': [283096], 'chrom': 'XI'},
"YDR092W" : {'stop': [630173], 'start': [629906], 'chrom': 'IV'},"YDR500C" : {'stop': [1450846], 'start': [1450458], 'chrom': 'IV'},"YNL044W" : {'stop': [545370], 'start': [545292], 'chrom': 'XIV'},
"YPR170W-B" : {'stop': [883486], 'start': [883388], 'chrom': 'XVI'},"YER014C-A" : {'stop': [184677], 'start': [184170], 'chrom': 'V'},"YHR097C" : {'stop': [298484], 'start': [298361], 'chrom': 'VIII'},
"YLR287C-A" : {'stop': [713155], 'start': [712726], 'chrom': 'XII'},"YML025C" : {'stop': [225338], 'start': [225240], 'chrom': 'XIII'},"YLR316C" : {'stop': [766129, 766249], 'start': [766074, 766182], 'chrom': 'XII'},
"YNL246W" : {'stop': [185586], 'start': [185492], 'chrom': 'XIV'},"YDL079C" : {'stop': [314336], 'start': [314045], 'chrom': 'IV'}
} |
983,538 | 46d519c7b0a27785c69021eb385bdd9f420c1a9b | """
Copyright 2012-2018 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import io
import unittest
import urllib.error
from typing import cast, IO
from hqlib import metric_source
class JenkinsOWASPDependencyReportUnderTest(metric_source.JenkinsOWASPDependencyReport):
# pylint: disable=too-few-public-methods
""" Override the url_open method to return a fixed HTML fragment. """
contents = ""
html = (
'<tr>\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="High:1 - Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-92822313/">Microsoft.AspNet.WebApi.Core.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="High:1 " width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
'\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="High:1width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
' <tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
' <tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>')
def url_open(self, url: str, log_error: bool = True) -> IO: # pylint: disable=unused-argument
return cast(IO, io.StringIO(self.html))
def _get_soup(self, url: str):
""" Get a beautiful soup of the HTML at the url. """
if 'raise' in self.contents:
raise urllib.error.HTTPError(None, None, None, None, None)
else:
return super()._get_soup(url)
class JenkinsOWASPDependencyReportTest(unittest.TestCase):
""" Unit tests for the Jenkins OWASP dependency report class. """
def setUp(self):
JenkinsOWASPDependencyReportUnderTest._api.cache_clear()
JenkinsOWASPDependencyReportUnderTest.nr_warnings.cache_clear()
self.__jenkins = JenkinsOWASPDependencyReportUnderTest('http://jenkins/', 'username', 'password')
self.html = (
'<tr>\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="High:1 - Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-92822313/">Microsoft.AspNet.WebApi.Core.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="High:1 " width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
'\n'
'<tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="High:1width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
' <tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>\n'
' <tr>\n'
' <td class="pane">\n'
' <a href="file.-1840927159/">Microsoft.AspNet.WebApi.Cors.nuspec</a>\n'
' </td>\n'
' <td class="pane">\n'
' <table cellpadding="0" cellspacing="0" tooltip="Normal:2" width="100%">\n'
' </tr>\n'
' </table>\n'
' </td>\n'
'</tr>')
def test_high_priority_warnings(self):
""" Test retrieving high priority warnings. """
self.__jenkins.contents = self.html
self.assertEqual(3, self.__jenkins.nr_warnings(('job',), 'high'))
def test_normal_priority_warnings(self):
""" Test retrieving normal priority warnings. """
self.__jenkins.contents = self.html
self.assertEqual(4, self.__jenkins.nr_warnings(('job',), 'normal'))
def test_low_priority_warnings(self):
""" Test retrieving low priority warnings. """
self.__jenkins.contents = self.html
self.assertEqual(0, self.__jenkins.nr_warnings(('job',), 'low'))
def test_url(self):
""" Test the url for a OWASP dependency report. """
self.assertEqual(['http://jenkins/job/job_name/lastSuccessfulBuild/dependency-check-jenkins-pluginResult/'],
self.__jenkins.metric_source_urls('job_name'))
def test_http_error(self):
""" Test that the default is returned when a HTTP error occurs. """
self.__jenkins.contents = 'raise'
self.assertEqual(-1, self.__jenkins.nr_warnings(('job',), 'normal'))
def test_datetime(self):
""" Test that the age of the job is returned. """
self.assertEqual(datetime.datetime.min, self.__jenkins.datetime(*('job',)))
|
983,539 | 0a338969dd013a180ba5d1e020d4d9e3c14d4116 | import re
from taggit_suggest.models import TagKeyword, TagRegex
from taggit.models import Tag
def _suggest_keywords(content):
"""
Suggest by keywords
"""
suggested_keywords = set()
keywords = TagKeyword.objects.all()
for k in keywords:
# Use the stem if available, otherwise use the whole keyword
if k.stem:
if k.stem in content:
suggested_keywords.add(k.tag_id)
elif k.keyword in content:
suggested_keywords.add(k.tag_id)
return suggested_keywords
def _suggest_regexes(content):
"""
Suggest by regular expressions
"""
# Grab all regular expressions and compile them
suggested_regexes = set()
regex_keywords = TagRegex.objects.all()
# Look for our regular expressions in the content
for r in regex_keywords:
if re.search(r.regex, content):
suggested_regexes.add(r.tag_id)
return suggested_regexes
def suggest_tags(content):
"""
Suggest tags based on text content
"""
suggested_keywords = _suggest_keywords(content)
suggested_regexes = _suggest_regexes(content)
suggested_tag_ids = suggested_keywords | suggested_regexes
return Tag.objects.filter(id__in=suggested_tag_ids)
|
983,540 | 8538ea4aa8d214358ff58d528ba9b10e64540b6e | from compundInterestCalculator import Interest
interest = Interest(1000, 0.05)
rate = 5
max_rate = 10
while rate <= max_rate:
print(interest.calculate_interest(10))
interest.change_rate(rate/100)
rate += 1
|
983,541 | 4959fe7bf4562131afffd3c7a7a38a2d71aeec1f | # import math
# # import Ba
# import random
#
# list1 = []
# list2 = []
# list3 = []
#
# for i in range(1,30,2):
#
# list1.append(i)
# for i in range(31, 60, 2):
#
# list2.append(i)
# for i in range(61, 90, 2):
#
# list3.append(i)
#
# list4 = list1+list2+list3
#
# print(list4)
#
# print(len(list4))
# print("hfjdak hffuaf h")
# print(random.sample(list4, 5))
import bisect
# list1 = [10, 20, 30]
# list1 = []
#
# for i in range(10,1,-1):
# bisect.insort(list1, i)
# # bisect.insort(list1, 15)
#
# print(list1)
# dict1 = {1: 1, 2: 9, 3: 4}
# sorted_tuples = sorted(dict1.items(), key=lambda item: item[1])
# print(sorted_tuples) # [(1, 1), (3, 4), (2, 9)]
# sorted_dict = {k: v for k, v in sorted_tuples}
#
# print(sorted_dict) # {1: 1, 3: 4, 2: 9}
# def Merge(dict1, dict2, dict3):
# res = {**dict1, **dict2, **dict3}
# return res
#
#
# # Driver code
# dict1 = {'a': 10, 'b': 8}
# dict2 = {'d': 6, 'c': 4}
# dict4 = {'e': 7, 'f': 9}
# dict3 = Merge(dict1, dict2, dict4)
# for k in dict3.keys():
# print(k)
# import itertools
# import collections
#
# d = collections.OrderedDict((('foo', 'bar'), (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')))
# x = itertools.islice(d.items(), 0, 2)
#
# # for key, value in x:
# # print (key, value)
#
# d = {1: 1, 2: 9, 3: 4}
#
# w = list(d.keys())[:2]
# print(w)
# list1 = [10, 20, 30]
#
#
# def addNextUser(restOfUserList):
# newUser = restOfUserList[0]
# restOfUserList.remove(restOfUserList[0])
# for i in restOfUserList:
# print(i)
# return newUser
#
#
# print("NEw User",addNextUser(list1))
# Python code to demonstrate addition of tuple to a set.
# s = set()
# t = ('f', 'o')
#
# # adding tuple t to set s.
# s.add(t)
#
# print(s)
power = []
for i in range(-10, -90, -5):
power.append(i)
print(power)
print("Power in ra") |
983,542 | db3e3b10d21aef3cc557a3aa25b32daf79f28f8d | ### Mortgage Calculator App ###
print("|~~~~~~~~~~Mortgage Calculator App~~~~~~~~~|")
print('\n')
interest =(float(input('Enter your present loan interest rate: %'))/100)/12
years =float(input('Enter the term of loan (years): '))*12
amount =float(input('Enter the amount of loan after put down-payment: $'))
numerator = interest*((1+interest)**years)
denominator = (1+interest)**years-1
f = float("{0:.2f}".format(amount*numerator/denominator))
print('\n')
print("Principal Borrowed:%7.2f"% amount)
print("Monthly Mortgage Payment: $",f)
print("\nThank you for using the app ^.^")
|
983,543 | afcacce0d2deec6e477ec48d071e91f5121d342d | from aiohttp import web
import asyncio
import random
import os
async def handle(request):
await asyncio.sleep(random.randint(0, 1))
mock_osm_response = {
'rate': {
'remaining': 9999,
},
'results': [
{
'geometry': {
'lat': random.uniform(-90, 90),
'lng': random.uniform(-90, 90),
},
'components': {
'_type': 'street',
'country': 'Brasil',
'country_code': 'br',
'state': 'RS',
'city': 'Porto Alegre',
'suburn': 'Montserrat',
'postcode': '90480-003',
'road': 'Avenida Carlos Gomes',
'house_number': random.randint(1, 10000),
}
}
]
}
return web.json_response(mock_osm_response)
async def init(host, port):
app = web.Application()
app.router.add_route('GET', '/', handle)
app.router.add_route('GET', '/{any}', handle)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
loop = asyncio.get_event_loop()
host = os.environ.get('MOCK_SERVER_HOST')
port = os.environ.get('MOCK_SERVER_PORT')
loop.run_until_complete(init(host, port))
loop.run_forever()
|
983,544 | ad8eca567fc6d4f4d8581bcf52cd0b6275511944 | #code_in_python
#binary_search_tree
class Node :
def __init__(self,val) :
self.val=val
self.leftChild=None
self.rightChild=None
def insert(self,data) :
if self.val==data :
return False
elif self.val > data :
if self.leftChild :
return self.leftChild.insert(data)
else :
self.leftChild=Node(data)
return True
else :
if self.rightChild :
return self.rightChild.insert(data)
else :
self.rightChild=Node(data)
return True
def search (self,data) :
if self.val==data :
return True
elif self.val>data :
if self.leftChild :
return self.leftChild.search(data)
else :
return False
else :
if self.rightChild :
return self.rightChild.search(data)
else :
return False
obj=Node(5)
print(obj.insert(6))
print (obj.insert(10))
print (obj.insert(11))
print (obj.search(1))
|
983,545 | 0dd70450d111b824f5cfd7d2d96a16e49a6cf3bb | from __future__ import absolute_import
from __future__ import division
import math
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
#from tensorflow.python import debug as tf_debug
import numpy as np
import cv2
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
import os
import signal
from voc_utils import VOCLoader
from coco_utils import COCOLoader
import ssd
from utilities import *
slim = tf.contrib.slim
#################
# PARAMETERS #
#################
input_ckpt_path = 'data/model.ckpt-906808'
output_idx = '10'
output_root = os.path.join('data','train',output_idx)
if not os.path.exists(output_root):
os.makedirs(output_root)
output_graph_path = os.path.join(output_root, 'output_graph.pb')
output_labels_path = os.path.join(output_root, 'labels.txt')
output_ckpt_path = os.path.join(output_root, 'model.ckpt')
voc_loader = VOCLoader(os.getenv('VOC_ROOT')) #17125
train_loader = COCOLoader(os.getenv('COCO_ROOT'),'train2014') #66843
valid_loader = COCOLoader(os.getenv('COCO_ROOT'),'val2014')
log_root = '/tmp/mobilenet_logs/'
if not os.path.exists(log_root):
os.makedirs(log_root)
bottleneck_root = 'data/bottlenecks'
if not os.path.exists(bottleneck_root):
os.makedirs(bottleneck_root)
categories = train_loader.list_image_sets() # same
num_classes = len(categories)
train_batch_size = 64
valid_batch_size = 1 # must remain at 1.
MODEL_INPUT_WIDTH = 224
MODEL_INPUT_HEIGHT = 224
MODEL_INPUT_DEPTH = 3
train_iters = int(10e3)
#split_ratio = 0.85
# Learning Rate Params
init_learning_rate = 1e-3
min_learning_rate = 1e-4
num_samples = len(voc_loader.list_all()) + len(train_loader.list_all()) # = 83968
steps_per_epoch = num_samples / train_batch_size # or thereabout.
epochs_per_decay = 0.5
net_decay_steps = train_iters / (epochs_per_decay * steps_per_epoch) # of decay steps in training run
decay_factor = (min_learning_rate / init_learning_rate) ** (1./net_decay_steps)
steps_per_decay = steps_per_epoch * epochs_per_decay
steps_per_valid = 10
steps_per_save = 100
tf.logging.set_verbosity(tf.logging.INFO)
##############
# SSD PARAMS #
##############
box_ratios = [1.0, 1.0, 2.0, 3.0, 1.0/2, 1.0/3]
num_boxes = len(box_ratios)
num_outputs = num_boxes * (num_classes + 4)
##################
def dwc(inputs, num_out, scope, stride=1, padding='SAME', output_activation_fn=tf.nn.elu):
dc = slim.separable_conv2d(inputs,
num_outputs=None,
stride=stride,
padding=padding,
depth_multiplier=1,
kernel_size=[3, 3],
scope=scope+'/dc')
pc = slim.conv2d(dc,
num_out,
kernel_size=[1, 1],
activation_fn=output_activation_fn,
scope=scope+'/pc')
return pc
def ssd_ops(feature_tensors, gt_box_tensor, gt_split_tensor, gt_label_tensor, num_classes, is_training=True, reuse=None):
with tf.variable_scope('SSD', reuse=reuse) as sc:
with slim.arg_scope([slim.conv2d, slim.separable_convolution2d],
activation_fn=tf.nn.elu,
#weights_regularizer=slim.l2_regularizer(4e-5), -- removing regularizer as per Mobilenet Paper
normalizer_fn = slim.batch_norm,
normalizer_params={
'is_training' : is_training,
'decay' : 0.99,
'fused' : True,
'reuse' : reuse,
'scope' : 'BN'
}
):
feature_tensors = list(feature_tensors) # copy list in case used outside
# extra features
depths = [512, 384, 256]
for i in range(3):
with tf.variable_scope('feat_%d' % i):
feats = dwc(feature_tensors[-1], depths[i], scope='f_dwc', padding='VALID')
feature_tensors.append(feats)
# bbox predictions
output_tensors = []
grid_dims = []
for i, t in enumerate(feature_tensors):
h,w = t.get_shape().as_list()[1:3]
grid_dims.append((h,w))
with tf.variable_scope('box_%d' % i):
## Separate Localization/Classification Prediction
## From Feature Map
loc = t
loc = dwc(loc, 256, scope='b_dwc_loc_1')
loc = dwc(loc, num_boxes * 4, scope='b_dwc_loc_2')
loc = tf.reshape(loc, (-1, h*w*num_boxes, 4))
cls = t
cls = dwc(cls, 256, scope='b_dwc_cls_1')
cls = dwc(cls, num_boxes * num_classes, scope='b_dwc_cls_2')
cls = tf.reshape(cls, (-1, h*w*num_boxes, num_classes))
## Coupled Localization/Classification Prediction
#logits = t
#logits = dwc(logits, 512, scope='b_dwc_1')
#logits = dwc(logits, num_outputs, scope='b_dwc_2', output_activation_fn=None)
#logits = tf.reshape(logits, (-1, h*w*num_boxes, num_classes+4))
#loc,cls = tf.split(logits, [4, num_classes], axis=2)
output_tensors.append((loc,cls))
d_boxes = []
net_acc = []
n = len(output_tensors)
s_min = 0.1
s_max = 0.9
scales = []
def s(i):
return s_min + (s_max - s_min) / (n-1) * (i)
with tf.name_scope('train'):
for i, logits in enumerate(output_tensors):
grid_dim = grid_dims[i]
s_k = s(i)
s_kn = s(i+1)
w = np.sqrt(s_kn/s_k)
d_box = np.reshape(ssd.default_box(grid_dim, box_ratios, scale=s_k, wildcard=w), (-1,4))
d_box = tf.constant(d_box, tf.float32)
iou, sel, cls, delta = ssd.create_label_tf(gt_box_tensor, gt_split_tensor, gt_label_tensor, d_box)
acc = ssd.ops(logits, iou, sel, cls, delta, num_classes = num_classes, is_training = is_training)
d_boxes.append(d_box)
net_acc.append(acc)
acc = tf.reduce_mean(net_acc)
with tf.name_scope('pred'):
pred_box, pred_cls, pred_val = ssd.pred(output_tensors, d_boxes, num_classes=num_classes)
return {
'box' : tf.identity(pred_box, name='pred_box'),
'cls' : tf.identity(pred_cls, name='pred_cls'),
'val' : tf.identity(pred_val, name='pred_val'),
'acc' : acc,
}
def get_or_create_bottlenecks(sess, bottleneck_tensors, image, loader, anns, batch_size, distorter=None):
all = (batch_size <= 0)
if not all:
anns = np.random.choice(anns, batch_size, replace=False)
n = len(bottleneck_tensors)
btls = [[] for _ in range(n)]
boxs = []
lbls = []
spls = []
for i, ann in enumerate(anns):
if all and i%100==0:
print '%d ) %s' % (i, ann)
### GRAB DATA ###
btl_file = os.path.join(bottleneck_root, str(ann) + '_btl.npz')
img_file, box, lbl = loader.grab(ann)
if all or not os.path.exists(btl_file):
# TODO : currently disabled btl
image_in = cv2.imread(img_file)[...,::-1]/255.0
btl = sess.run(bottleneck_tensors, feed_dict={image : image_in})
d = {str(i) : btl[i] for i in range(n)}
np.savez(btl_file, **d)
if not all:
if distorter is not None and np.random.random() > 0.5:
# apply distortion
image_in = cv2.imread(img_file)[...,::-1]/255.0
image_in, box, lbl = distorter.apply(sess, image_in, box, lbl)
btl = sess.run(bottleneck_tensors, feed_dict={image : image_in})
for i in range(n):
btls[i].append(btl[i])# for i in range(n))
else:
btl = np.load(btl_file, allow_pickle=True)
for i in range(n):
btls[i].append(btl[str(i)])# for i in range(n))
#btls[i].append(btl[str(i)] for i in range(n))
boxs.append(box)
lbls.append(lbl)
spls.append(len(lbl))
#################
### RUN DISTORTION ###
#d_im, d_bb, d_lbl = sess.run([d_image, d_bbox, d_label], feed_dict={image : image_in, bbox : bbox_in, label : label_in})
#btl = sess.run(bottleneck_tensor, feed_dict={image : d_im})
#lbl = get_label(d_bb, d_lbl, w,h)
######################
if not all:
btls = [np.concatenate(b, axis=0) for b in btls]
boxs = np.concatenate(boxs, axis=0)
lbls = np.concatenate(lbls, axis=0)
res = btls + [boxs, spls, lbls]
# no need to concatenate spls
return res
else:
return [], []
stop_request = False
def sigint_handler(signal, frame):
global stop_request
stop_request = True
def main(_):
global stop_request
signal.signal(signal.SIGINT, sigint_handler)
with tf.Graph().as_default():
####################
# Select the model #
####################
network_fn = nets_factory.get_network_fn(
'mobilenet',
num_classes=1001,
is_training=False,
width_multiplier=1.0
)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = 'mobilenet'
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = network_fn.default_image_size
image = tf.placeholder(tf.float32, [None,None,3], name='input')
images = tf.expand_dims(image_preprocessing_fn(image, eval_image_size, eval_image_size), 0)
####################
# Define the model #
####################
#distorter = Distorter(image)
logits, _ = network_fn(images)
###############
# Restoration #
###############
variables_to_restore = slim.get_variables_to_restore()
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.65)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
with tf.Session(config=config) as sess:
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
### DATA PROVIDERS ###
anns_voc = voc_loader.list_all() # use for training
anns_train = train_loader.list_all()
anns_valid = valid_loader.list_all()
data_provider = [(train_loader,anns_train),(voc_loader,anns_voc),(valid_loader,anns_valid)]
### DEFINE TENSORS ###
bottleneck_names = [ # source bottleneck
'MobileNet/conv_ds_6/pw_batch_norm/Relu:0',
'MobileNet/conv_ds_12/pw_batch_norm/Relu:0',
'MobileNet/conv_ds_14/pw_batch_norm/Relu:0'
]
bottleneck_tensors = [sess.graph.get_tensor_by_name(b) for b in bottleneck_names]
def create_input_tensors(input_size=None):
feature_tensors = [tf.placeholder_with_default(b, shape=([None]+b.get_shape().as_list()[1:])) for b in bottleneck_tensors]
gt_boxes = tf.placeholder(tf.float32, [None, 4]) # ground truth boxes -- aggregated
gt_splits = tf.placeholder(tf.int32, [input_size]) # # ground truth boxes per sample
gt_labels = tf.placeholder(tf.int32, [None]) # ground truth labels -- aggregated
return feature_tensors + [gt_boxes, gt_splits, gt_labels]
# Train Inputs
t_input_tensors = create_input_tensors(input_size=train_batch_size)
t_select_ratio = np.array([float(len(anns_voc)), float(len(anns_train)), 0.0])
t_select_ratio /= sum(t_select_ratio)
t_ft_1, t_ft_2, t_ft_3, t_gb, t_gs, t_gl = t_input_tensors
# Validation Inputs
v_input_tensors = create_input_tensors(input_size=valid_batch_size)
v_select_ratio = [0.0,0.0,1.0] # only select validation
v_ft_1, v_ft_2, v_ft_3, v_gb, v_gs, v_gl = v_input_tensors
def feed_dict(is_training=True):
select_ratio = t_select_ratio if is_training else v_select_ratio
batch_size = train_batch_size if is_training else valid_batch_size
input_tensors = t_input_tensors if is_training else v_input_tensors
loader,anns = data_provider[np.random.choice(3, p=select_ratio)]
input_values = get_or_create_bottlenecks(sess, bottleneck_tensors, image, loader, anns, batch_size)
return {t:v for (t,v) in zip(input_tensors, input_values)}
### DEFINE MODEL ###
t_ops = ssd_ops([t_ft_1, t_ft_2, t_ft_3], t_gb, t_gs, t_gl, num_classes, reuse=None, is_training=True)
v_ops = ssd_ops([v_ft_1, v_ft_2, v_ft_3], v_gb, v_gs, v_gl, num_classes, reuse=True, is_training=False)
t_loss = tf.losses.get_total_loss()
v_loss = tf.reduce_sum(tf.losses.get_losses(loss_collection='valid_loss'))
with tf.name_scope('evaluation'):
tf.summary.scalar('t_loss', t_loss)
tf.summary.scalar('train_accuracy', t_ops['acc'])
tf.summary.scalar('v_loss', v_loss, collections=['valid_summary'])
tf.summary.scalar('valid_accuracy', v_ops['acc'], collections=['valid_summary'])
global_step = slim.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step, steps_per_decay, decay_factor)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
opt = tf.train.AdamOptimizer(learning_rate = learning_rate)
train_vars = slim.get_trainable_variables(scope='SSD')
with tf.control_dependencies(update_ops):
train_op = opt.minimize(t_loss, global_step=global_step, var_list=train_vars)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, input_ckpt_path) # -- only restores mobilenet weights
total_saver = tf.train.Saver() # -- save all
#total_saver.restore(sess, output_ckpt_path) # restore all
run_id = 'run_%02d' % len(os.walk(log_root).next()[1])
run_log_root = os.path.join(log_root, run_id)
train_writer = tf.summary.FileWriter(os.path.join(run_log_root, 'train'), sess.graph)
valid_writer = tf.summary.FileWriter(os.path.join(run_log_root, 'valid'), sess.graph)
train_summary = tf.summary.merge_all()
valid_summary = tf.summary.merge_all('valid_summary')
### START TRAINING ###
for i in range(train_iters):
if stop_request:
break
s,_ = sess.run([train_summary,train_op], feed_dict=feed_dict(is_training=True))
train_writer.add_summary(s, i)
if (i % steps_per_valid) == 0: # -- evaluate
l, a, s = sess.run([v_loss, v_ops['acc'], valid_summary], feed_dict=feed_dict(is_training=False))
valid_writer.add_summary(s, i)
print('%d ) Loss : %.3f, Accuracy : %.2f' % (i, l, a))
if i>0 and (i % steps_per_save) == 0: # -- save checkpoint
total_saver.save(sess, output_ckpt_path, global_step=global_step)
if (i > steps_per_save): # didn't terminate prematurely
output_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(), [v_ops[s].name[:-2] for s in ['box', 'cls', 'val']]) # strip :0
with gfile.FastGFile(output_graph_path, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(output_labels_path, 'w') as f:
f.write('\n'.join(categories) + '\n')
if __name__ == '__main__':
tf.app.run()
|
983,546 | f9ce274ba894f7de961ca67ead7fb9e903425f52 | #! /local/python/2.7/bin/python
import sys, optparse, itertools, warnings, traceback, os.path
import HTSeq
class UnknownChrom( Exception ):
pass
def invert_strand( iv ):
iv2 = iv.copy()
if iv2.strand == "+":
iv2.strand = "-"
elif iv2.strand == "-":
iv2.strand = "+"
else:
raise ValueError, "Illegal strand"
return iv2
#def count_reads_in_features( sam_filename, gff_filename, stranded, overlap_mode, feature_type, id_attribute, quiet, minaqual, samout ):
def count_reads_in_features( sam_filename, gff_filename, stranded,
overlap_mode, feature_type, id_attribute, quiet, minaqual, samout, custom_stat ):
def write_to_samout( r, assignment ):
if samoutfile is None:
return
if not pe_mode:
r = (r,)
for read in r:
if read is not None:
samoutfile.write( read.original_sam_line.rstrip() +
"\tXF:Z:" + assignment + "\n" )
if quiet:
warnings.filterwarnings( action="ignore", module="HTSeq" )
if samout != "":
samoutfile = open( samout, "w" )
else:
samoutfile = None
# MB
if custom_stat != "":
custom_stat_file=open(custom_stat,"a")
else:
custom_stat_file = None
# endMB
features = HTSeq.GenomicArrayOfSets( "auto", stranded != "no" )
counts = {}
# Try to open samfile to fail early in case it is not there
if sam_filename != "-":
open( sam_filename ).close()
gff = HTSeq.GFF_Reader( gff_filename )
i = 0
try:
for f in gff:
if f.type == feature_type:
try:
feature_id = f.attr[ id_attribute ]
except KeyError:
sys.exit( "Feature %s does not contain a '%s' attribute" %
( f.name, id_attribute ) )
if stranded != "no" and f.iv.strand == ".":
sys.exit( "Feature %s at %s does not have strand information but you are "
"running htseq-count in stranded mode. Use '--stranded=no'." %
( f.name, f.iv ) )
features[ f.iv ] += feature_id
counts[ f.attr[ id_attribute ] ] = 0
i += 1
if i % 100000 == 0 and not quiet:
sys.stderr.write( "%d GFF lines processed.\n" % i )
except:
sys.stderr.write( "Error occured in %s.\n" % gff.get_line_number_string() )
raise
if not quiet:
sys.stderr.write( "%d GFF lines processed.\n" % i )
if len( counts ) == 0 and not quiet:
sys.stderr.write( "Warning: No features of type '%s' found.\n" % feature_type )
try:
if sam_filename != "-":
read_seq = HTSeq.SAM_Reader( sam_filename )
first_read = iter(read_seq).next()
else:
read_seq = iter( HTSeq.SAM_Reader( sys.stdin ) )
first_read = read_seq.next()
read_seq = itertools.chain( [ first_read ], read_seq )
pe_mode = first_read.paired_end
except:
sys.stderr.write( "Error occured when reading first line of sam file.\n" )
raise
try:
if pe_mode:
read_seq_pe_file = read_seq
read_seq = HTSeq.pair_SAM_alignments( read_seq )
empty = 0
ambiguous = 0
notaligned = 0
lowqual = 0
nonunique = 0
# MB: Creating detailed stats
if custom_stat_file:
sam_lines = 0
skipped = 0
assigned_reads = 0
assigned_reads_s = 0
assigned_reads_p = 0
assigned_genes = 0
assigned_genes_s = 0
assigned_genes_p = 0
empty_s = 0
empty_p = 0
ambiguous_s = 0
ambiguous_p = 0
anu_dict = {}
# endMB
i = 0
for r in read_seq:
i += 1
if not pe_mode:
if not r.aligned:
notaligned += 1
write_to_samout( r, "not_aligned" )
continue
try:
if r.optional_field( "NH" ) > 1:
write_to_samout( r, "alignment_not_unique" )
nonunique += 1
continue
except KeyError:
pass
if r.aQual < minaqual:
lowqual += 1
write_to_samout( r, "too_low_aQual" )
continue
if stranded != "reverse":
iv_seq = ( co.ref_iv for co in r.cigar if co.type == "M" )
else:
iv_seq = ( invert_strand( co.ref_iv ) for co in r.cigar if co.type == "M" )
else:
if r[0] is not None and r[0].aligned:
#for co in r[0].cigar:
#sys.stderr.write("ID: %s, %s\n" % (r[0].original_sam_line.split('\t')[0],co.ref_iv))
if stranded != "reverse":
iv_seq = ( co.ref_iv for co in r[0].cigar if co.type == "M" )
else:
iv_seq = ( invert_strand( co.ref_iv ) for co in r[0].cigar if co.type == "M" )
else:
iv_seq = tuple()
if r[1] is not None and r[1].aligned:
if stranded != "reverse":
iv_seq = itertools.chain( iv_seq,
( invert_strand( co.ref_iv ) for co in r[1].cigar if co.type == "M" ) )
else:
iv_seq = itertools.chain( iv_seq,
( co.ref_iv for co in r[1].cigar if co.type == "M" ) )
else:
if ( r[0] is None ) or not ( r[0].aligned ):
write_to_samout( r, "not_aligned" )
notaligned += 1
continue
try:
if ( r[0] is not None and r[0].optional_field( "NH" ) > 1 ) or \
( r[1] is not None and r[1].optional_field( "NH" ) > 1 ):
nonunique += 1
write_to_samout( r, "alignment_not_unique" )
# MB: Counting the 'alignment_not_unique' for one or both mates
if custom_stat_file:
if r[0] is not None and r[1] is not None: # The 2 mates are mapped
read_id = r[0].original_sam_line.split('\t')[0]
if read_id not in anu_dict: # The read is not indexed yet
anu_dict[read_id] = {}
anu_dict[read_id]['chr1'] = r[0].original_sam_line.split('\t')[2]
anu_dict[read_id]['chr2'] = r[1].original_sam_line.split('\t')[2]
anu_dict[read_id]['start1'] = r[0].original_sam_line.split('\t')[3]
anu_dict[read_id]['start2'] = r[1].original_sam_line.split('\t')[3]
anu_dict[read_id]['al_unique1'] = True
anu_dict[read_id]['al_unique2'] = True
else: # Read already indexed
if anu_dict[read_id]['al_unique1']:
if anu_dict[read_id]['chr1'] != r[0].original_sam_line.split('\t')[2] or anu_dict[read_id]['start1'] != r[0].original_sam_line.split('\t')[3]: # At least two positions exists for mate r[0]
anu_dict[read_id]['al_unique1'] = False
if anu_dict[read_id]['al_unique2']:
if anu_dict[read_id]['chr2'] != r[1].original_sam_line.split('\t')[2] or anu_dict[read_id]['start2'] != r[1].original_sam_line.split('\t')[3]: # At least two positions exists for mate r[1]
anu_dict[read_id]['al_unique2'] = False
elif r[0] is not None: # Only r[1] is mapped
anu_dict[r[0].original_sam_line.split('\t')[0]] = {}
anu_dict[r[0].original_sam_line.split('\t')[0]]['al_unique1'] = False
else: # Only r[0] is mapped
anu_dict[r[1].original_sam_line.split('\t')[0]] = {}
anu_dict[r[1].original_sam_line.split('\t')[0]]['al_unique2'] = False
# endMB
continue
except KeyError:
pass
if ( r[0] and r[0].aQual < minaqual ) or ( r[1] and r[1].aQual < minaqual ):
lowqual += 1
write_to_samout( r, "too_low_aQual" )
continue
try:
if overlap_mode == "union":
fs = set()
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[ iv ].steps():
fs = fs.union( fs2 )
elif overlap_mode == "intersection-strict" or overlap_mode == "intersection-nonempty":
fs = None
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[ iv ].steps():
if len(fs2) > 0 or overlap_mode == "intersection-strict":
if fs is None:
fs = fs2.copy()
else:
fs = fs.intersection( fs2 )
else:
sys.exit( "Illegal overlap mode." )
if fs is None or len( fs ) == 0:
write_to_samout( r, "no_feature" )
empty += 1
# MB
if custom_stat_file:
if r[0] is not None and r[1] is not None:
empty_p += 1
else:
empty_s += 1
# endMB
elif len( fs ) > 1:
write_to_samout( r, "ambiguous[" + '+'.join( fs ) + "]" )
ambiguous += 1
# MB
if custom_stat_file:
if r[0] is not None and r[1] is not None:
ambiguous_p += 1
else:
ambiguous_s += 1
# endMB
else:
write_to_samout( r, list(fs)[0] )
counts[ list(fs)[0] ] += 1
# MB
if custom_stat_file:
if counts[ list(fs)[0] ] == 1:
assigned_genes += 1
assigned_reads += 1
if r[0] is not None and r[1] is not None:
assigned_reads_p += 1
else:
assigned_reads_s += 1
# endMB
except UnknownChrom:
if not pe_mode:
rr = r
else:
rr = r[0] if r[0] is not None else r[1]
# MB
if custom_stat_file:
skipped += 1
#endMB
if not quiet:
sys.stderr.write( ( "Warning: Skipping read '%s', because chromosome " +
"'%s', to which it has been aligned, did not appear in the GFF file.\n" ) %
( rr.read.name, iv.chrom ) )
if i % 100000 == 0 and not quiet:
sys.stderr.write( "%d sam %s processed.\n" % ( i, "lines " if not pe_mode else "line pairs" ) )
except:
if not pe_mode:
sys.stderr.write( "Error occured in %s.\n" % read_seq.get_line_number_string() )
else:
sys.stderr.write( "Error occured in %s.\n" % read_seq_pe_file.get_line_number_string() )
raise
if not quiet:
sys.stderr.write( "%d sam %s processed.\n" % ( i, "lines " if not pe_mode else "line pairs" ) )
if samoutfile is not None:
samoutfile.close()
for fn in sorted( counts.keys() ):
print "%s\t%d" % ( fn, counts[fn] )
print "no_feature\t%d" % empty
print "ambiguous\t%d" % ambiguous
print "too_low_aQual\t%d" % lowqual
print "not_aligned\t%d" % notaligned
print "alignment_not_unique\t%d" % nonunique
# MB: Adding stats in the custom_stat file
if custom_stat_file:
custom_stat_file.write("Input SAM file line count\t"+"{:,}".format(sum(1 for line in open(sam_filename) if not line.startswith('@')))+"\n\n")
custom_stat_file.write("SAM lines (pairs or singles) processed\t"+"{:,}".format(i)+"\n\n")
custom_stat_file.write("Skipped pairs (chr.not found)\t"+"{:,}".format(skipped)+"\n\n")
custom_stat_file.write("Assigned_genes\t"+"{:,}".format(assigned_genes)+"\n\n")
custom_stat_file.write("Assigned_reads\t"+"{:,}".format(assigned_reads)+"\n")
custom_stat_file.write("\tSingle reads\t"+"{:,}".format(assigned_reads_s)+"\n")
custom_stat_file.write("\tPaired reads\t"+"{:,}".format(assigned_reads_p)+"\n\n")
custom_stat_file.write("No_features\t"+"{:,}".format(empty)+"\n")
custom_stat_file.write("\tSingle reads\t"+"{:,}".format(empty_s)+"\n")
custom_stat_file.write("\tPaired reads\t"+"{:,}".format(empty_p)+"\n\n")
custom_stat_file.write("Ambiguous\t"+"{:,}".format(ambiguous)+"\n")
custom_stat_file.write("\tSingle reads\t"+"{:,}".format(ambiguous_s)+"\n")
custom_stat_file.write("\tPaired reads\t"+"{:,}".format(ambiguous_p)+"\n\n")
custom_stat_file.write("Alignment_not_unique\t"+"{:,}".format(nonunique)+"\n")
custom_stat_file.write("\tSAM lines (pairs or singles)\t"+"{:,}".format(len(anu_dict))+"\n")
# Counting the 'alignment_not_unique' with one or both mates multiply aligned
simpl = 0
multipl = 0
for i in anu_dict:
if 'al_unique1' in anu_dict[i] and 'al_unique2' in anu_dict[i]:
if anu_dict[i]['al_unique1'] or anu_dict[i]['al_unique2']:
simpl += 1
else:
multipl += 1
else:
multipl += 1
custom_stat_file.write("\tOne_mate_uniquely_mapped\t"+"{:,}".format(simpl)+"\n")
custom_stat_file.write("\tTwo_mates_multiply_mapped\t"+"{:,}".format(multipl)+"\n")
# endMB
def main():
optParser = optparse.OptionParser(
usage = "%prog [options] sam_file gff_file",
description=
"This script takes an alignment file in SAM format and a " +
"feature file in GFF format and calculates for each feature " +
"the number of reads mapping to it. See " +
"http://www-huber.embl.de/users/anders/HTSeq/doc/count.html for details.",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'HTSeq' framework, version %s." % HTSeq.__version__ )
optParser.add_option( "-m", "--mode", type="choice", dest="mode",
choices = ( "union", "intersection-strict", "intersection-nonempty" ),
default = "union", help = "mode to handle reads overlapping more than one feature" +
"(choices: union, intersection-strict, intersection-nonempty; default: union)" )
optParser.add_option( "-s", "--stranded", type="choice", dest="stranded",
choices = ( "yes", "no", "reverse" ), default = "yes",
help = "whether the data is from a strand-specific assay. Specify 'yes', " +
"'no', or 'reverse' (default: yes). " +
"'reverse' means 'yes' with reversed strand interpretation" )
optParser.add_option( "-a", "--minaqual", type="int", dest="minaqual",
default = 0,
help = "skip all reads with alignment quality lower than the given " +
"minimum value (default: 0)" )
optParser.add_option( "-t", "--type", type="string", dest="featuretype",
default = "exon", help = "feature type (3rd column in GFF file) to be used, " +
"all features of other type are ignored (default, suitable for Ensembl " +
"GTF files: exon)" )
optParser.add_option( "-i", "--idattr", type="string", dest="idattr",
default = "gene_id", help = "GFF attribute to be used as feature ID (default, " +
"suitable for Ensembl GTF files: gene_id)" )
optParser.add_option( "-o", "--samout", type="string", dest="samout",
default = "", help = "write out all SAM alignment records into an output " +
"SAM file called SAMOUT, annotating each line with its feature assignment " +
"(as an optional field with tag 'XF')" )
# MB: Option for the detailed stats
optParser.add_option( "-c", "--custom_stat", type="string", dest="custom_stat",
default = "", help = "write out customised statistics (such as the assigned reads count) about the counting in a separate file " )
# endMB
optParser.add_option( "-q", "--quiet", action="store_true", dest="quiet",
help = "suppress progress report and warnings" )
if len( sys.argv ) == 1:
optParser.print_help()
sys.exit(1)
(opts, args) = optParser.parse_args()
if len( args ) != 2:
sys.stderr.write( sys.argv[0] + ": Error: Please provide two arguments.\n" )
sys.stderr.write( " Call with '-h' to get usage information.\n" )
sys.exit( 1 )
warnings.showwarning = my_showwarning
try:
count_reads_in_features( args[0], args[1], opts.stranded,
opts.mode, opts.featuretype, opts.idattr, opts.quiet, opts.minaqual,
opts.samout, opts.custom_stat )
except:
sys.stderr.write( "Error: %s\n" % str( sys.exc_info()[1] ) )
sys.stderr.write( "[Exception type: %s, raised in %s:%d]\n" %
( sys.exc_info()[1].__class__.__name__,
os.path.basename(traceback.extract_tb( sys.exc_info()[2] )[-1][0]),
traceback.extract_tb( sys.exc_info()[2] )[-1][1] ) )
sys.exit( 1 )
def my_showwarning( message, category, filename, lineno = None, line = None ):
sys.stderr.write( "Warning: %s\n" % message )
if __name__ == "__main__":
main() |
983,547 | b0dece9c805e4c31fdb47b81324d7c96ffc4e0b5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Analyzes new, assigned or reopened bugs of
# developers with pardus mailed address.
#
# Copyright 2009, Semen Cirit <scirit@pardus.org.tr>
#
import MySQLdb
from operator import itemgetter
import os
from sets import Set
import base64
## Configs
## Configs
db_server = ""
db_user = ""
db_pass = ""
db_name = ""
bugzilla_user_id = 1
##
db = MySQLdb.connect(db_server, db_user, db_pass, db_name)
# filter bugs
c = db.cursor()
devNames = []
totalOpenedBugs = []
openedBugsLW = []
fixedBugsLW = []
activityLW = []
print "All Opened, Reopened or Assigned Bugs"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
if not os.path.exists("accounts"):
os.system("wget http://svn.pardus.org.tr/uludag/trunk/common/accounts")
devFile = open("accounts", "r")
for line in devFile.readlines():
if "#" not in line.strip() and ":" in line.strip():
devName = line.split(":")[1]
#print devName
N_DevBugs = c.execute("SELECT userid FROM `profiles` where realname like '%s'" % ("%"+devName+"%"))
bugs = {}
for userid in c.fetchall():
#print userid
N_bug = c.execute("SELECT * FROM `bugs` where assigned_to = %s and (bug_status = 'NEW' or bug_status = 'ASSIGNED' or bug_status = 'REOPENED')" % userid[0])
if N_bug != 0:
devNames.append(devName.decode('utf-8'))
totalOpenedBugs.append(N_bug)
c.execute("SELECT * FROM `versions`")
openBugsVersionDict = {}
# Remove duplicate versions
differentVersionsSet = Set([])
for version in c.fetchall():
differentVersionsSet.add(version[0])
# Add versions to dict if there exist open, reopened or assigned bugs
for dversion in differentVersionsSet:
N_openBug = c.execute("SELECT * FROM `bugs` where (bug_status = 'NEW' or bug_status = 'ASSIGNED' or bug_status = 'REOPENED') and version='%s'" % dversion)
if N_openBug!=0:
openBugsVersionDict[dversion] = N_openBug
print "\nAll Opened, Reopened or Assigned Bugs for Different Versions"
print "=============================================================\n"
# draw the pie chart for all open bugs according to different version for all open bugs according to different versions
from pylab import *
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,10))
openBugsVersionA = []
for key in openBugsVersionDict:
openBugsVersionA.append(key + ":" + str(openBugsVersionDict[key]))
labels = openBugsVersionA
fracs = openBugsVersionDict.values()
explode=(0, 0.05, 0, 0)
pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True)
title('All Open Bugs According to Different Versions', bbox={'facecolor':'0.8', 'pad':5})
savefig("all_open_bugs_for_version.png")
print(" .. image:: all_open_bugs_for_version.png")
print(" :align: center")
print "\nAll Opened, Reopened or Assigned for All Contributors"
print "=====================================================\n"
# Draw the bar chart of all open bugs of all contributors
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(20,30))
N = len(devNames)
ind = np.arange(N) # the x locations for the groups
width = 0.5 # the width of the bars
devStd =[]
for devName in devNames:
devStd.append(0.5)
rects2 = plt.bar(0,0.5 , totalOpenedBugs , ind+width, width,
color='y',
xerr=devStd,
error_kw=dict(elinewiadth=6, ecolor='yellow'), orientation="horizontal", align="center")
# add some
plt.xlabel('Number of Bugs')
plt.title('All Open Bugs of a Contributor')
plt.yticks(ind+width, devNames )
def autolabel(rects):
# attach some text labels
for rect in rects:
plt.text( 1.05*rect.get_width(), rect.get_y()+rect.get_height()/2., '%d'%int(rect.get_width()), ha='center', va='bottom')
autolabel(rects2)
plt.savefig("all_open_bugs.png")
print(" .. image:: all_open_bugs.png")
|
983,548 | 1617e5178248acf1eeeb29ea178838f1a58f8c4e | from django.core.exceptions import ValidationError
def limit_instances(sender, instance, created, *args, **kwargs):
"""Check if more than 1 instance(s) is/are being created."""
if created:
raise ValidationError("There can only be 1 instance of this model.")
|
983,549 | 0360776ba46dc215c856726776c58084fa550130 | def fraction(n):
if n==1 or n==0:
return 1
else:
return n*fraction(n-1)
print(fraction(11)) |
983,550 | 5c3c8e93b46c7795a46d8cd8e1fc533283fb9b18 | import torch
import torch.nn as nn
import urllib.request
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2', 'GetResnet']
# CHECKPOINT_1_PATH = '/Users/pytorch-image-classification/cp1.pt'
# CHECKPOINT_2_PATH = '/Users/pytorch-image-classification/cp2.pt'
# CHECKPOINT_3_PATH = '/Users/pytorch-image-classification/cp3.pt'
# CHECKPOINT_4_PATH = '/Users/pytorch-image-classification/cp4.pt'
CHECKPOINT_1_PATH = 'cp1.pt'
CHECKPOINT_2_PATH = 'cp2.pt'
CHECKPOINT_3_PATH = 'cp3.pt'
CHECKPOINT_4_PATH = 'cp4.pt'
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
import os
if(os.path.exists(CHECKPOINT_1_PATH)):
x = torch.load(CHECKPOINT_1_PATH)
print("Checkpoint 1 recovered")
else:
x = self.conv1(x)
x = self.bn1(x)
torch.save(x,CHECKPOINT_1_PATH)
print("First layer done")
if(os.path.exists(CHECKPOINT_2_PATH)):
x = torch.load(CHECKPOINT_2_PATH)
print("Checkpoint 2 recovered")
else:
x = self.relu(x)
x = self.maxpool(x)
torch.save(x,CHECKPOINT_2_PATH)
print("Second layer done")
if(os.path.exists(CHECKPOINT_3_PATH)):
x = torch.load(CHECKPOINT_3_PATH)
print("Checkpoint 3 recovered")
else:
x = self.layer1(x)
x = self.layer2(x)
torch.save(x,CHECKPOINT_3_PATH)
print("third layer done")
if(os.path.exists(CHECKPOINT_4_PATH)):
x = torch.load(CHECKPOINT_4_PATH)
print("Checkpoint 4 recovered")
else :
x = self.layer3(x)
x = self.layer4(x)
torch.save(x,CHECKPOINT_4_PATH)
print("Fourth layer done")
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class GetResnet():
model_function_map = {
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'wide_resnet50_2': wide_resnet50_2,
'wide_resnet101_2': wide_resnet101_2,
'resnext50_32x4d' : resnext50_32x4d,
'resnext101_32x8d', resnext101_32x8d
}
def get_model_choices(self):
return list(model_function_map.keys())
def get_model(self, architecture, pretrained=True):
return model_function_map[architecture](pretrained=pretrained)
def load_state_dict_from_url(architecture, url, progress):
architecture_path = architecture + '.pth'
if os.path.exists(architecture_path):
print('Found cached ', architecture, ' model. Using it.')
return torch.load(architecture_path)
else:
print('Requesting model data from the internet.')
urllib.request.urlretrieve(url, architecture_path)
return torch.load(architecture_path)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(arch, model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
983,551 | b0ec3e66583e8868c5ced24ed919f8ceead7cfd3 | #!/usr/bin/python3
basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
print(basket) # 删除重复的
if 'orange' in basket: # 检测成员
print(True)
else:
print(False)
if 'crabgrass' in basket:
print(True)
else:
print(False)
# 以下演示了两个集合的操作
a = set('abracadabra')
b = set('alacazam')
print(a) # a 中唯一的字母
print(a-b) # 在 a 中的字母,但不在 b 中
print(a | b) # 在 a 或 b 中的字母
print(a & b) # 在 a 和 b 中都有的字母
print(a ^ b) # 在 a 或 b 中的字母,但不同时在 a 和 b 中 |
983,552 | f801b85bdebeb6fe21721a2f323b2b7433325a8d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 08:32:34 2020
@author: parasnandwani10
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
#loading data set
iris=load_iris()
X=iris.data
Y=iris.target
#split data into train test
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,random_state=42,test_size=0.5)
#Build the model
clf=RandomForestClassifier(n_estimators=10)
#Model training
clf.fit(X_train,Y_train)
#Predictions
Y_Pred=clf.predict(X_test)
#check accuracy
print(accuracy_score(Y_Pred,Y_test))
"""
Once we are satisfied with our model we will pickel our model
What is pickling?
Python Object Can be saved as binary file so we can load it at later point
in time an levrage all its properties
"""
import pickle
with open('rf.pkl','wb') as model_pkl:#wb is write binary mode to save file
pickle.dump(clf,model_pkl)#dump the model object with given file |
983,553 | 4ad49b9f776086a5d70c5900011985c93faa41ca | class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ == __name__
return mockType
else:
return Mock()
|
983,554 | f9b78c3a7caed2a043eefe5710bdc355867d8f4a | import shutil
from genrl.agents import (
BernoulliMAB,
BootstrapNeuralAgent,
FixedAgent,
LinearPosteriorAgent,
NeuralGreedyAgent,
NeuralLinearPosteriorAgent,
NeuralNoiseSamplingAgent,
VariationalAgent,
)
from genrl.trainers import DCBTrainer
from genrl.utils import CovertypeDataBandit
from .utils import write_data
class TestCBAgent:
def _test_fn(self, agent_class) -> None:
bandits = []
d = """2596,51,3,258,0,510,221,232,148,6279,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,5
2590,56,2,212,-6,390,220,235,151,6225,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,5
2804,139,9,268,65,3180,234,238,135,6121,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2
2785,155,18,242,118,3090,238,238,122,6211,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,2
2595,45,2,153,-1,391,220,234,150,6172,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,5
"""
fpath = write_data("covtype.data", d)
bandits.append(CovertypeDataBandit(path=fpath))
fpath.unlink()
bandits.append(BernoulliMAB(bandits=10, arms=10))
for bandit in bandits:
agent = agent_class(bandit)
trainer = DCBTrainer(agent, bandit, log_mode=["stdout"])
trainer.train(timesteps=10, update_interval=2, update_after=5, batch_size=2)
shutil.rmtree("./logs")
def test_linear_posterior_agent(self) -> None:
self._test_fn(LinearPosteriorAgent)
def test_neural_linear_posterior_agent(self) -> None:
self._test_fn(NeuralLinearPosteriorAgent)
def test_neural_greedy_agent(self) -> None:
self._test_fn(NeuralGreedyAgent)
def test_variational_agent(self) -> None:
self._test_fn(VariationalAgent)
def test_bootstrap_neural_agent(self) -> None:
self._test_fn(BootstrapNeuralAgent)
def test_neural_noise_sampling_agent(self) -> None:
self._test_fn(NeuralNoiseSamplingAgent)
def test_fixed_agent(self) -> None:
self._test_fn(FixedAgent)
|
983,555 | 88a6383001e3f31a970d598d655e499c45955153 | class Employee():
def __init__(self,last_name,first_name,salary):
self.first_name=first_name
self.last_name=last_name
self.salary=salary
def give_raise(self,increment=0):
self.salary=5000
self.salary+=increment
|
983,556 | a6f35ab163364c6b0e4fc35d9eafdba810b8fb32 | #!/usr/bin/env python3
from aws_cdk import core
from aws.cdk_constructs.ExampleLambdaStack import ExampleLambdaStack
app = core.App()
ExampleLambdaStack(app, "trading-engine-cdk", stage='alpha')
app.synth()
|
983,557 | 974ca0e649d1e784e49158ac037504e989ef01d1 | def getData():
file = open("p067_triangle.txt","rt")
data = ""
for row in file:
data += row
data = data.replace("\n"," ")
return data.strip()
data = getData()
splitData = data.split(" ")
print(splitData)
maxPath = {0:0}
maxTot = 0
row = 0
for i in range(len(splitData)):
maxPath[i] += int(splitData[i])
rowSum = sum(range(row+1))
if(i >= rowSum):
row += 1
if row + i + 1 < len(splitData):
if row + i in maxPath:
maxPath[row + i] = max([maxPath[row + i],maxPath[i]])
else:
maxPath[row + i] = maxPath[i]
if row + i + 1 in maxPath:
maxPath[row + i + 1] = max([maxPath[row + i + 1],maxPath[i]])
else:
maxPath[row + i + 1] = maxPath[i]
if maxPath[i] > maxTot:
maxTot = maxPath[i]
print(maxTot) |
983,558 | dbe682d9ca94b62149c845762ebf60878de26da0 | # -*- coding: utf-8 -*-
"""
Python script to verify 16 bit adder using analysis provided by Dr. Aslan
@author: sergi0
"""
import numpy as np #import numpy lib
import pandas as pd
import matplotlib.pyplot as plt
n = int(input("Enter input bit size (default 16): ")) #ask user for for number of bits
N = int(input("Enter number of testvectors: ")) #ask user for the number of test vectors to be performed
k = int(input("Enter the number of inputs to generate (default 2): ")) #ask user for number of inputs to generate
test_vectors = np.uint32(2**n**np.random.rand(N,k)) # generate numpy array values (N,k)
#print(test_vectors) # print test vectors (optional)
np.savetxt('in_vec.txt', test_vectors, delimiter = ' ', fmt = "%.4X")
input("Run the simulation and verify out_vec.txt generated, then press enter to continue")
AA = pd.read_csv("out_vec.txt", delimiter = ' ', header = None, skiprows = 2)
Values = AA.values # this will get the three column numpy array
arrayA = Values[:,0] #A vaector values
arrayB = Values[:,1] #B vector values
Out = Values[:,2] #output values
N = np.arange(0,arrayA.size) #determines number of testvectors tested
test = abs(arrayA+arrayB-Out); #will highlight any discrepancies
plt.plot(N,test)
plt.show()
#k = 0
#j = 0
#for i in (test):
# if (i<1e-16):
# #print("Passed")
# k = k+1
# else:
# j = j+1#
#
#print(f"Total number of testvectors \t: {len(test)}")
#print(f"Total number of Passes \t\t: {k}")
#print(f"Total number of Fails \t\t: {j}")
#print(f"Error {%} \t\t\t: %{100*j/(j+k)}")
|
983,559 | dc4adb5c8ab523fa210f9bb7c75ffcfa3cdb5990 | BLOCK_HEIGHT = 80
BLOCK_WIDTH = 110
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
ORANGE = (255, 165, 0)
YELLOW = (255, 255, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255) |
983,560 | ad908622631bd7b74c1cd9e56db59024714fea21 | import os,sys
import string
import json
from optparse import OptionParser
import csv
sys.path.append('../lib/')
import csvutil
__version__="1.0"
__status__ = "Dev"
def main():
study_list = []
cat_dict = {}
file_dict = {}
caseid2studyid = {}
fileid2cat = {}
in_file = "generated/misc/studies.csv"
data_frame = {}
csvutil.load_sheet(data_frame,in_file, "\t")
f_list = data_frame["fields"]
for row in data_frame["data"]:
study_list.append(row[0])
in_file = "downloads/sample_list_from_gdc/gdc_sample_sheet.primary_tumor.tsv"
data_frame = {}
csvutil.load_sheet(data_frame,in_file, "\t")
f_list = data_frame["fields"]
for row in data_frame["data"]:
study_id = row[f_list.index("Project ID")]
file_id = row[f_list.index("File ID")]
case_id = row[f_list.index("Case ID")]
if case_id not in cat_dict:
caseid2studyid[case_id] = study_id
cat_dict[case_id] = []
file_dict[case_id] = []
fileid2cat[case_id] = {}
cat_dict[case_id].append("Primary-Tumor")
file_dict[case_id].append(file_id)
fileid2cat[case_id][file_id] = "Primary-Tumor"
in_file = "downloads/sample_list_from_gdc/gdc_sample_sheet.solid_tissue_normal.tsv"
data_frame = {}
csvutil.load_sheet(data_frame,in_file, "\t")
f_list = data_frame["fields"]
for row in data_frame["data"]:
study_id = row[f_list.index("Project ID")]
case_id = row[f_list.index("Case ID")]
file_id, case_id = row[0], row[5]
if case_id not in cat_dict:
caseid2studyid[case_id] = study_id
cat_dict[case_id] = []
file_dict[case_id] = []
fileid2cat[case_id] = {}
cat_dict[case_id].append("Solid-Tissue-Normal")
file_dict[case_id].append(file_id)
fileid2cat[case_id][file_id] = "Solid-Tissue-Normal"
for case_id in cat_dict:
if len(sorted(set(cat_dict[case_id]))) == 2:
study_id = caseid2studyid[case_id]
if study_id not in study_list:
continue
print study_id, case_id
if __name__ == '__main__':
main()
|
983,561 | 1da1aef7683add3517b639604fa557d8b33c3f3e | import cv2 as cv
import os
img_prefix='../Version1/images/'
def Img_Size(img_prefix):
files=os.listdir(img_prefix)
sizes=dict()
total_num=len(files)
for i,file in enumerate(files):
if i % 10 == 0 :
print('processing {}/{}'.format(i,total_num))
img_path=os.path.join(img_prefix,file)
img=cv.imread(img_path)
size=str(img.shape[0])+'x'+str(img.shape[1])
if size not in sizes.keys():
sizes[size]=0
sizes[size]+=1
return sizes
if __name__=='__main__':
result=Img_Size(img_prefix)
for key in result.keys():
print('{} : {}'.format(key,result[key]))
|
983,562 | 7fee25daa815d590ccc3a9f69270375cf486c221 | # from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
import json
import time
class Scraper:
"""super class for all classes requiring selenium webdrivers to scrap i-learn
e.g. Home_Page, Course
"""
def __init__(self, starting_url):
self.login_page_title = "CAS – Central Authentication Service"
with open('data/secret.json', 'r') as f:
secret = json.load(f)
self.login_credentials = {
"username": secret.get("username"),
"password": secret.get("password"),
}
self.driver = webdriver.Firefox()
self.driver.get(starting_url)
# TODO: add more error handling to login
self.login()
def login(self):
# do not run without verifying page is for i-learn login
if self.driver.title == self.login_page_title:
for key, value in self.login_credentials.items():
field = self.driver.find_element_by_id(key)
field.send_keys(value)
submit_btn = self.driver.find_element_by_class_name("btn-login")
submit_btn.click()
def sleep_until_div(self, div_id):
"""sleeps selenium web-driver until a div is found with the desired div_id present"""
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.ID, div_id))
)
# wait for other divs to load inside courses
time.sleep(2)
|
983,563 | 04783a299a66117f9fe662dd427e1f1be609f8b2 | from django.db import models
"""
Consulta
{'bin': {'issuer': {'name': 'BANCO DO BRASIL SA'}, 'product': {'name': 'Grafite'}, 'allowedCaptures': [{'name': 'POS', 'code': 1}, {'name': 'TEF', 'code': 2}, {'name': 'INTERNET', 'code': 3}, {'name': 'TELEMARKETING', 'code': 4}, {'name': 'ATM', 'code': 5}], 'usages': [{'name': 'Crédito Elo à vista ', 'code': 0}, {'name': 'Elo Parcelado Loja', 'code': 0}, {'name': 'Débito Elo à Vista', 'code': 0}], 'services': []}}
"""
class TabelaBin(models.Model):
number_bin = models.IntegerField(primary_key=True)
name_issuer = models.CharField(max_length=100)
name_product = models.CharField(max_length=100)
allowedCaptures_name = models.CharField(max_length=100)
allowedCaptures_code = models.IntegerField()
usages_name = models.CharField(max_length=100)
usages_code = models.IntegerField()
services_name = models.CharField(max_length=100)
services_isExchangeableOffer = models.BooleanField(default=False)
|
983,564 | fabce21acb43e9b7e3f9afcdb4547893e7b0656c | # -*- encoding:utf-8 -*-
__author__ = 'Gh'
__date__ = '2017/5/25 下午3:51' |
983,565 | 96afbf03172e4e8cfc5c0fe687eb21a0ec69faec | from datetime import datetime, timedelta
import core
from .models import TemperatureOverTimeChart
class ChartData(object):
@classmethod
def get_tempF(cls):
tempF_data = TemperatureOverTimeChart.objects.values('timestamp', 'tempF')
return tempF_data |
983,566 | d76abe3a2507ff0339293e006ef6852e69733dc2 | """Rename client_id to endpoint_id
Revision ID: c8c1daae3ec3
Revises: a1d83bc371bb
Create Date: 2016-05-01 13:42:08.635303
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'c8c1daae3ec3'
down_revision = 'a1d83bc371bb'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("reaction_links") as batch_op:
batch_op.alter_column('client_id', new_column_name='endpoint_id')
with op.batch_alter_table("events") as batch_op:
batch_op.alter_column('client_id', new_column_name='endpoint_id')
def downgrade():
pass
|
983,567 | d7f393dad48f3fb65815e37ca477fcc7a77bbc99 | from .services import CartService
services = [CartService, ]
|
983,568 | 531337cf9c81f37b30692e90bb7e80f6a6dd3664 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.tasks.action.schema import SToolBar
from pyface.tasks.task_layout import TaskLayout, PaneItem, Splitter, Tabbed
from traits.api import on_trait_change, Button, Float, Str, Int, Bool, Event, HasTraits
from traitsui.api import Item, VGroup, UItem, HGroup
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pychron_traits import PacketStr
from pychron.entry.labnumber_entry import LabnumberEntry
from pychron.entry.tasks.actions import (
SavePDFAction,
DatabaseSaveAction,
PreviewGenerateIdentifiersAction,
GenerateIdentifiersAction,
ClearSelectionAction,
RecoverAction,
SyncMetaDataAction,
ManualEditIdentifierAction,
EditMaterialAction,
)
from pychron.entry.tasks.labnumber.panes import (
LabnumbersPane,
IrradiationPane,
IrradiationEditorPane,
IrradiationCanvasPane,
LevelInfoPane,
ChronologyPane,
FluxHistoryPane,
IrradiationMetadataEditorPane,
)
from pychron.envisage.browser.base_browser_model import BaseBrowserModel
from pychron.envisage.browser.record_views import SampleRecordView
from pychron.envisage.tasks.base_task import BaseManagerTask
from pychron.globals import globalv
from pychron.pychron_constants import DVC_PROTOCOL
from pychron.regex import PACKETREGEX
ATTRS = (
("sample", ""),
("material", ""),
("project", ""),
("principal_investigator", ""),
("weight", 0),
(
"j",
0,
),
("j_err", 0),
)
MANUAL_EDIT_VIEW = okcancel_view(
Item("edit_identifier_entry", label="Identifier"), title="Manual Edit Identifier"
)
class ClearSelectionView(HasTraits):
sample = Bool(True)
material = Bool(True)
weight = Bool(True)
project = Bool(True)
principal_investigator = Bool(True)
j = Bool(True)
j_err = Bool(True)
select_all = Button("Select All")
clear_all = Button("Clear All")
def _select_all_fired(self):
self._apply_all(True)
def _clear_all_fired(self):
self._apply_all(False)
def _apply_all(self, v):
for a, _ in ATTRS:
setattr(self, a, v)
def attributes(self):
return [(a, v) for a, v in ATTRS if getattr(self, a)]
def traits_view(self):
v = okcancel_view(
VGroup(
HGroup(UItem("select_all"), UItem("clear_all")),
VGroup(
Item("sample"),
Item("material"),
Item("project"),
Item("principal_investigator"),
Item("weight"),
Item("j", label="J"),
Item("j_err", label="J Err."),
),
),
title="Clear Selection",
)
return v
class LabnumberEntryTask(BaseManagerTask, BaseBrowserModel):
name = "Package"
id = "pychron.entry.irradiation.task"
edit_identifier_entry = Str
clear_sample_button = Button
refresh_needed = Event
dclicked = Event
principal_investigator = Str
tool_bars = [
SToolBar(SavePDFAction(), DatabaseSaveAction(), image_size=(16, 16)),
SToolBar(
GenerateIdentifiersAction(),
PreviewGenerateIdentifiersAction(),
image_size=(16, 16),
),
SToolBar(ClearSelectionAction()),
SToolBar(
RecoverAction(),
SyncMetaDataAction(),
ManualEditIdentifierAction(),
EditMaterialAction(),
),
]
invert_flag = Bool
selection_freq = Int
estimate_j_button = Button
j = Float
j_err = Float
note = Str
weight = Float
sample_search_str = Str
packet = PacketStr
set_packet_event = Event
use_increment_packet = Bool
include_recent = False
_suppress_load_labnumbers = True
# def __init__(self, *args, **kw):
# super(LabnumberEntryTask, self).__init__(*args, **kw)
# self.db.create_session()
def prepare_destroy(self):
self.db.close_session()
def _opened_hook(self):
self.db.create_session(force=True)
def _closed_hook(self):
self.db.close_session()
def activated(self):
self.debug("activated labnumber")
if self.manager.verify_database_connection(inform=True):
if self.db.connected:
self.manager.activated()
self.load_principal_investigators()
self.load_projects(include_recent=False)
def find_associated_identifiers(self):
ns = [ni.name for ni in self.selected_samples]
self.info("find associated identifiers {}".format(",".join(ns)))
self.manager.find_associated_identifiers(self.selected_samples)
def sync_metadata(self):
self.info("sync metadata")
self.manager.sync_metadata()
def generate_status_report(self):
self.info("generate status report")
self.manager.generate_status_report()
def recover(self):
self.info("recover")
self.manager.recover()
def clear_selection(self):
self.info("clear selection")
cs = ClearSelectionView()
info = cs.edit_traits()
if info.result:
for s in self.manager.selected:
for attr, value in cs.attributes():
setattr(s, attr, value)
self.manager.refresh_table = True
# def get_igsns(self):
# self.info('Get IGSNs')
#
# # if not igsn_repo.url:
# # self.warning_dialog('No IGSN URL set in preferences. '
# # 'The url is required before proceeding. ')
# # return
#
# self.manager.get_igsns()
def transfer_j(self):
self.info("Transferring J Data")
self.manager.transfer_j()
def manual_edit_material(self):
if not self.manager.selected:
self.information_dialog(
"Please select an existing irradiation position to edit"
)
return
self.manager.edit_material()
# if self.manager.edit_material():
# self.refresh_needed = True
def manual_edit_identifier(self):
if not self.manager.selected:
self.information_dialog(
"Please select an existing irradiation position to edit"
)
return
if not self.confirmation_dialog(
"Please be very careful editing identifiers. Serious unintended consequences "
"may result from changing an identifier. This function should only be used by "
"users with a deep understanding of how pychron handles irradiations. \n\nAre "
"you sure you want to continue?"
):
return
self.info("Manual edit identifier")
self.edit_identifier_entry = self.manager.selected[0].identifier
info = self.edit_traits(view=MANUAL_EDIT_VIEW, kind="livemodal")
if info and self.edit_identifier_entry:
self.manager.selected[0].identifier = self.edit_identifier_entry
# def import_irradiation(self):
# self.info('Import irradiation')
# self.manager.import_irradiation()
# def import_analyses(self):
# self.info('Import analyses')
# self.manager.import_analyses()
# def generate_tray(self):
# # p='/Users/ross/Sandbox/entry_tray'
# p = self.open_file_dialog()
# if p is not None:
# gm = GraphicModel()
#
# # op='/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_no_spokes.txt'
#
# gm.srcpath = p
# # gm.xmlpath=p
# # p = make_xml(p,
# # default_radius=radius,
# # default_bounds=bounds,
# # convert_mm=convert_mm,
# # use_label=use_label,
# # make=make,
# # rotate=rotate)
# #
# # # p = '/Users/ross/Sandbox/graphic_gen_from_csv.xml'
# # gm.load(p)
# gcc = GraphicGeneratorController(model=gm)
# info = gcc.edit_traits(kind='livemodal')
# if info.result:
# if self.confirmation_dialog(
# 'Do you want to save this tray to the database. Saving tray as "{}"'.format(gm.name)):
# self.manager.save_tray_to_db(gm.srcpath, gm.name)
def save_pdf(self):
if globalv.irradiation_pdf_debug:
p = "/Users/ross/Sandbox/irradiation.pdf"
else:
p = self.save_file_dialog()
if p:
self.debug("saving pdf to {}".format(p))
if self.manager.save_pdf(p):
self.view_pdf(p)
def make_irradiation_book_pdf(self):
if globalv.entry_labbook_debug:
p = "/Users/ross/Sandbox/irradiation.pdf"
else:
p = self.save_file_dialog()
if p:
self.manager.make_labbook(p)
self.view_pdf(p)
def generate_identifiers(self):
self.manager.generate_identifiers()
def preview_generate_identifiers(self):
self.manager.preview_generate_identifiers()
def import_irradiation_load_xls(self):
if globalv.entry_irradiation_import_from_file_debug:
path = self.open_file_dialog()
else:
path = "/Users/ross/Sandbox/template.xls"
if path:
self.manager.import_irradiation_load_xls(path)
# def make_irradiation_load_template(self):
# path = self.save_file_dialog()
# if path:
# # p = '/Users/ross/Sandbox/irrad_load_template.xls'
# path = add_extension(path, '.xls')
# self.manager.make_irradiation_load_template(path)
#
# self.information_dialog('Template saved to {}'.format(path))
# # self.view_xls(path)
# def import_sample_from_file(self):
# path = self.open_file_dialog(default_directory=paths.root_dir,
# wildcard_args=('Excel', ('*.xls', '*.xlsx')))
# # path = '/Users/ross/Desktop/sample_import.xls'
# if path:
# # from pychron.entry.loaders.xls_sample_loader import XLSSampleLoader
# #
# from pychron.entry.sample_loader import XLSSampleLoader
# sample_loader = XLSSampleLoader()
# sample_loader.load(path)
# sample_loader.do_import()
#
# # spnames = []
# # if self.selected_projects:
# # spnames = [ni.name for ni in self.selected_projects]
# #
# # self.load_projects(include_recent=False)
# #
# # if spnames:
# # sel = [si for si in self.projects if si.name in spnames]
# # self.selected_projects = sel
# #
# # self._load_associated_samples()
# def import_sample_metadata(self):
# self.warning('Import sample metadata Deprecated')
# path = '/Users/ross/Programming/git/dissertation/data/minnabluff/lithologies.xls'
# path = '/Users/ross/Programming/git/dissertation/data/minnabluff/tables/TAS.xls'
# path = '/Users/ross/Programming/git/dissertation/data/minnabluff/tables/environ.xls'
# if not os.path.isfile(path):
# path = self.open_file_dialog()
#
# if path:
# self.manager.import_sample_metadata(path)
def export_irradiation(self):
from pychron.entry.export.export_selection_view import ExportSelectionView
pref = self.application.preferences
connection = {
attr: pref.get("pychron.massspec.database.{}".format(attr))
for attr in ("name", "host", "password", "username")
}
es = ExportSelectionView(
irradiations=self.manager.irradiations,
default_massspec_connection=connection,
)
info = es.edit_traits(kind="livemodal")
if info.result:
if not es.selected:
self.warning_dialog("Please select Irradiation(s) to export")
else:
from pychron.entry.export.export_util import do_export
do_export(
self.manager.dvc, es.export_type, es.destination_dict, es.selected
)
def _manager_default(self):
dvc = self.application.get_service(DVC_PROTOCOL)
dvc.connect()
dvc.create_session()
return LabnumberEntry(application=self.application, dvc=dvc)
# def _importer_default(self):
# return ImportManager(db=self.manager.db,
# connect=False)
def _default_layout_default(self):
return TaskLayout(
left=Splitter(
PaneItem("pychron.labnumber.irradiation"),
Tabbed(
# PaneItem('pychron.labnumber.extractor'),
PaneItem("pychron.labnumber.editor")
),
orientation="vertical",
),
right=Splitter(
PaneItem("pychron.entry.level"),
PaneItem("pychron.entry.chronology"),
PaneItem("pychron.entry.irradiation_canvas"),
PaneItem("pychron.entry.flux_history"),
orientation="vertical",
),
)
def create_central_pane(self):
return LabnumbersPane(model=self.manager)
def create_dock_panes(self):
return [
IrradiationPane(model=self.manager),
ChronologyPane(model=self.manager),
LevelInfoPane(model=self.manager),
FluxHistoryPane(model=self.manager),
IrradiationEditorPane(model=self),
IrradiationMetadataEditorPane(model=self),
IrradiationCanvasPane(model=self.manager),
]
# ===========================================================================
# GenericActon Handlers
# ===========================================================================
def save_as(self):
self.save()
def save(self):
self.save_to_db()
def save_to_db(self):
self.manager.save()
# private
def _increment_packet(self):
m = PACKETREGEX.search(self.packet)
if m:
v = m.group("prefix")
if not v:
v = ""
a = m.group("number")
self.packet = "{}{:02n}".format(v, int(a) + 1)
# handlers
def _estimate_j_button_fired(self):
self.manager.estimate_j()
@on_trait_change("selection_freq, invert_flag")
def _handle_selection(self):
if self.selection_freq:
self.manager.select_positions(self.selection_freq, self.invert_flag)
@on_trait_change("j,j_err, note, weight")
def _handle_j(self, obj, name, old, new):
if new:
self.manager.set_selected_attr(new, name)
@on_trait_change("set_packet_event")
def _handle_packet(self):
if not self.manager.selected:
self.warning_dialog(
"Please select an Irradiation Position before trying to set the Packet"
)
return
for s in self.manager.selected:
s.packet = self.packet
self.manager.refresh_table = True
if self.use_increment_packet:
self._increment_packet()
def _sample_search_str_changed(self, new):
if len(new) >= 3:
sams = self.db.get_samples(name_like=new)
self._set_sample_records(sams)
def _selected_samples_changed(self, new):
if new:
ni = new[0]
self.manager.set_selected_attrs(
(
ni.name,
ni.material,
ni.grainsize,
ni.project,
ni.principal_investigator,
),
(
"sample",
"material",
"grainsize",
"project",
"principal_investigator",
),
)
def _load_associated_samples(self, names=None):
if names is None:
if self.selected_projects:
names = [ni.name for ni in self.selected_projects]
# load associated samples
sams = self.db.get_samples(projects=names)
self._set_sample_records(sams)
def _set_sample_records(self, sams):
sams = [SampleRecordView(si) for si in sams]
self.samples = sams
self.osamples = sams
def _dclicked_fired(self):
self.selected_samples = []
def _clear_sample_button_fired(self):
self.selected_samples = []
@on_trait_change("extractor:update_irradiations_needed")
def _update_irradiations(self):
self.manager.updated = True
def _principal_investigator_changed(self, new):
if new:
self._load_projects_for_principal_investigators(pis=[new])
def _selected_projects_changed(self, old, new):
if new:
names = [ni.name for ni in new]
self.debug("selected projects={}".format(names))
self._load_associated_samples(names)
else:
self.samples = []
def _prompt_for_save(self):
self.manager.push_changes()
if self.manager.dirty:
message = "You have unsaved changes. Save changes to Database?"
ret = self._handle_prompt_for_save(message)
if ret == "save":
return self.manager.save()
return ret
return True
# ============= EOF =============================================
|
983,569 | 7adbfe11be3dc12381980215a3339f5a96904c18 | #Uva 11264: Coin Collector(ACEPTADO)
def main():
testCases = int(input())
numCoins = 0
for case in range(testCases):
numCoins = int(input())
coins = [int(i) for i in input().split()]
maxCoins = 2 #siempre habran al menos dos monedas
suma = coins[0]
#para cada caso de prueba
for i in range(1,numCoins-1):
if(suma + coins[i] < coins[i + 1]):
suma += coins[i]
maxCoins += 1
print(maxCoins)
if __name__ == '__main__':
main() |
983,570 | c5e5eb5a77f4b34d96d5715beefe01282b97b9a2 | """
@author Jenish Kevadia
Script imports methods from 'HWO1_Jenish_Kevadia.py' script and implements test cases for the type of triangle
"""
import unittest
from HW01_Jenish_Kevadia import classify_triangle, check_input
class TestTriangle(unittest.TestCase):
""" Test cases """
def test_classify_triangle(self):
""" Test case to check the type of triangle"""
self.assertEqual(classify_triangle(3, 3, 3), ('Equilateral'))
self.assertEqual(classify_triangle(2, 4, 2), ('Isosceles'))
self.assertEqual(classify_triangle(3, 4, 5), ('Scalene and Right triangle'))
def test_classify_triangle_string(self):
""" Test for string input """
self.assertEqual(classify_triangle('a', 1, 2), ('Isosceles'))
def test_classify_triangle_blank(self):
""" Test for blank input """
self.assertEqual(classify_triangle(1, '', 4), ('Scalene'))
def test_classify_triangle_negative(self):
""" Test for negative input """
self.assertEqual(classify_triangle(4, 6, -2), ('Scalene'))
if __name__ == "__main__":
""" Run test cases on startup """
unittest.main(exit=False, verbosity=2) |
983,571 | 7d06c9fea9904683577470260aa6f4564a955b6c | from __future__ import annotations # Reference: Reading on Type Annotations
from datetime import date # Python library for working with dates (and times)
from typing import List # Python library for expressing complex types
class Tweet:
"""A tweet, like in Twitter.
=== Attributes ===
content: the contents of the tweet.
userid: the id of the user who wrote the tweet.
created_at: the date the tweet was written.
likes: the number of likes this tweet has received.
"""
content: str
userid: str
created_at: date
likes: int
def __init__(self, who: str, when: date, what: str) -> None:
"""Initialize a new Tweet.
>>> t = Tweet('Rukhsana', date(2017, 9, 16), 'Hey!')
>>> t.userid
'Rukhsana'
>>> t.created_at
datetime.date(2017, 9, 16)
>>> t.content
'Hey!'
>>> t.likes
0
"""
# YOUR CODE HERE
self.content = what
self.userid = who
self.created_at = when
self.likes = 0
def like(self, n: int) -> None:
"""Record the fact that this tweet received <n> likes.
These likes are in addition to the ones <self> already has.
>>> t = Tweet('Rukhsana', date(2017, 9, 16), 'Hey!')
>>> t.like(3)
>>> t.likes
3
"""
# YOUR CODE HERE
self.likes += 1
def edit(self, new_content: str) -> None:
"""Replace the contents of this tweet with the new message.
>>> t = Tweet('Rukhsana', date(2017, 9, 16), 'Hey!')
>>> t.edit('Rukhsana is cool')
>>> t.content
'Rukhsana is cool'
"""
# YOUR CODE HERE
self.content = new_content
class User:
"""A Twitter user.
=== Attributes ===
userid: the userid of this Twitter user.
bio: the bio of this Twitter user.
tweets: the tweets that this user has made.
"""
# Attribute types
userid: str
bio: str
tweets: List[Tweet]
def __init__(self, id_: str, bio: str) -> None:
"""Initialize this User.
>>> david = User('David', 'is cool')
>>> david.tweets
[]
"""
# YOUR CODE HERE
self.userid = id_
self.bio = bio
self.tweets = []
def tweet(self, message: str) -> None:
"""Record that this User made a tweet with the given content.
Use date.today() to get the current date for the newly created tweet.
"""
# YOUR CODE HERE
tweet = Tweet(self.userid, date.today(), message)
self.tweets.append(tweet)
|
983,572 | c03831cf730b993cca544d660300547b9e3cdb35 | def ev(n):
j = 0
for i in range(0, len(n)):
if n[i] > 0:
j += 1
print(j)
n = list(map(int, input().split()))
ev(n)
|
983,573 | 40ca6769e3220983b08190ecc2c49e7bd378f26b | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class RandomPay(Page):
pass
page_sequence = [
RandomPay,
]
|
983,574 | 2f2dede969bf0182cd6894266f18933d349ee1fe | import json
import time
from dspipe import Pipe
import pandas as pd
from utils import govinfo_download
import bs4
n_downloads = 8
"""
Download html information for each item in the collection
Uses: https://api.govinfo.gov/docs/
"""
sleep_time = 1
def compute(f0, f1):
with open(f0) as FIN:
js = json.load(FIN)
try:
url = js["download"]["txtLink"]
except KeyError:
print(f"No text link for {f0}")
exit()
if url[-4:] != "/htm":
print("Expected HTM LINK", url)
exit()
raw = govinfo_download(url, as_json=False)
text = bs4.BeautifulSoup(raw, "lxml").text
with open(f1, "w") as FOUT:
FOUT.write(text)
print(f0, len(text))
time.sleep(sleep_time)
P = Pipe(
f"data/package_info",
"data/htm_text",
output_suffix=".txt",
input_suffix=".json",
shuffle=True,
)
P(compute, n_downloads)
|
983,575 | 5cc0262ad028ca05999d7ab07c8b2da4f88f6f5d | #static_exception.py
class Sum:
@staticmethod
def getSum(*args):
sum=0
for i in args:
sum+=i
return sum
def main():
print("Sum",Sum.getSum(1,2,3,4,5))
main()
|
983,576 | 3695120f9eeb8fee9a395f33dca7edf6af29f183 | x = float(input("digite o valor de x: "))
if((x<=-1) or (x>=1)):
print(round(x,2))
elif((x>-1) and (x<0) or (x>0) and (x<1)):
print("1")
elif(x==0):
print("2") |
983,577 | 7756188344279991fbc36e67a4f4ef5902bc74d2 | # -*- coding: iso-8859-1 -*-
#import re
import requests
def extraire_ip(url):
request = requests.get(url)
return request.text
#==============================================================================
# M A I N
#==============================================================================
if __name__ == "__main__" :
external_ip = extraire_ip("http://myip.dnsdynamic.org")
print 'external_ip: ', external_ip
|
983,578 | d21746fb37fb04e05f53a288560458dde4b344d1 | class TradeSystemBaseError(Exception):
"""
TradeSystem全体で利用する例外の基底クラス
"""
pass |
983,579 | 1acd30a241146d21f7690c725199b578eb565667 |
from unittest.mock import patch, MagicMock
from switchControl import Direction
import unittest
from unittest import skip
from unittest.mock import Mock
from flask import Flask
import json
from gpioWrapper import GPIO
from sectionControl.sectionController import build_controller
class AppTest(unittest.TestCase):
@patch('sectionControl.SectionControl')
def setUp(self, SectionControl):
self.SectionControl = SectionControl
self.section_control = SectionControl.return_value
self.app = Flask(__name__)
self.client = self.app.test_client()
self.section_dao = Mock()
self.app.register_blueprint(build_controller(self.section_dao))
def test_should_initialize_section_control(self):
self.SectionControl.assert_called_with(GPIO, self.section_dao)
def test_should_set_section(self):
rv = self.client.put('/sections/1', data=json.dumps({"enabled": True}), content_type='application/json')
self.assertEqual(204, rv.status_code)
self.section_control.set_section.assert_called_with(1, {"enabled": True})
def test_should_return_bad_request_if_enabled_field_is_missing(self):
rv = self.client.put('/sections/1', data=json.dumps({}), content_type='application/json')
self.assertEqual(400, rv.status_code)
self.section_control.set_section.assert_not_called
def test_should_add_section(self):
self.section_control.register_section.return_value = 4
rv = self.client.post('/sections/atPin/18')
self.assertEqual(200, rv.status_code)
self.assertEqual(b"4\n", rv.data)
self.section_control.register_section.assert_called_with(18)
def test_should_return_all_sections(self):
self.section_control.get_sections.return_value = [1,2,3]
rv = self.client.get('/sections')
self.assertEqual(200, rv.status_code)
self.assertEqual(b'{"data":[1,2,3]}\n', rv.data)
self.section_control.get_sections.assert_called_with()
|
983,580 | a137135333c0f9db5a63b63f8d2c1510bf2fd843 | for _ in range(int(input())):
n=int(input())
a=list(map(int,input().split()))
count=0
for i in range(1,n):
if a[i]==a[i-1]:
continue
count+=(abs(a[i]-a[i-1])-1)
print(count)
|
983,581 | e139b1846b9b7d6c091afaa68ab67c56294cd146 | import atexit
import ctypes
from ctypes import c_int, c_uint, c_uint32, c_long, Structure, CFUNCTYPE, POINTER
from ctypes.wintypes import DWORD, BOOL, HWND, HHOOK, MSG, WPARAM, LPARAM
import threading
LPMSG = POINTER(MSG)
MOUSEEVENTF_ABSOLUTE = 0x8000
MOUSEEVENTF_MOVE = 0x0001
MOUSEEVENTF_MOVE_NOCOALESCE = 0x2000
MOUSEEVENTF_LEFTDOWN = 0x0002
MOUSEEVENTF_LEFTUP = 0x0004
user32 = ctypes.WinDLL('user32', use_last_error = True)
class MSLLHOOKSTRUCT(Structure):
_fields_ = [("x", c_long), ("y", c_long),
('data', c_uint32), ("flags", DWORD),
("time", c_int), ('extrainfo', c_uint32), ]
LowLevelMouseProc = CFUNCTYPE(c_int, WPARAM, LPARAM, POINTER(MSLLHOOKSTRUCT))
SetWindowsHookEx = user32.SetWindowsHookExA
#SetWindowsHookEx.argtypes = [c_int, LowLevelMouseProc, c_int, c_int]
SetWindowsHookEx.restype = HHOOK
CallNextHookEx = user32.CallNextHookEx
#CallNextHookEx.argtypes = [c_int , c_int, c_int, POINTER(MSLLHOOKSTRUCT)]
CallNextHookEx.restype = c_int
UnhookWindowsHookEx = user32.UnhookWindowsHookEx
UnhookWindowsHookEx.argtypes = [HHOOK]
UnhookWindowsHookEx.restype = BOOL
GetMessage = user32.GetMessageW
GetMessage.argtypes = [LPMSG, c_int, c_int, c_int]
GetMessage.restype = BOOL
TranslateMessage = user32.TranslateMessage
TranslateMessage.argtypes = [LPMSG]
TranslateMessage.restype = BOOL
DispatchMessage = user32.DispatchMessageA
DispatchMessage.argtypes = [LPMSG]
NULL = c_int(0)
class TranslateInjectedMouse(threading.Thread):
daemon=True
def run(self):
def low_level_mouse_handler(nCode, wParam, lParam):
#print("handler")
lParam.contents.flags &= 0x11111100
return CallNextHookEx(NULL, nCode, wParam, lParam)
WH_MOUSE_LL = c_int(14)
mouse_callback = LowLevelMouseProc(low_level_mouse_handler)
self.mouse_hook = SetWindowsHookEx(WH_MOUSE_LL, mouse_callback, user32._handle, NULL)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, self.mouse_hook)
msg = LPMSG()
while not GetMessage(msg, NULL, NULL, NULL):
TranslateMessage(msg)
DispatchMessage(msg)
def stop(self):
UnhookWindowsHookEx(self.mouse_hook)
if __name__ == '__main__':
# this is all you need to translate in background
t = TranslateInjectedMouse()
t.start()
# below this is test code to create clicks
import time
mouse_event = user32.mouse_event
mouse_event(MOUSEEVENTF_MOVE, 0, 100, 0, 0)
'''
mouse_event(MOUSEEVENTF_MOVE, ctypes.c_int(100), ctypes.c_int(100), 0, 0)
time.sleep(3)
mouse_event(MOUSEEVENTF_MOVE, ctypes.c_int(200), ctypes.c_int(100), 0, 0)
time.sleep(3)
mouse_event(MOUSEEVENTF_MOVE, ctypes.c_int(300), ctypes.c_int(100), 0, 0)
time.sleep(3)
mouse_event(MOUSEEVENTF_MOVE, ctypes.c_int(300), ctypes.c_int(200), 0, 0)
time.sleep(3)
'''
#time.sleep(10)
# while True:
# try:
# time.sleep(1)
# mouse_event(MOUSEEVENTF_MOVE,200,200,0,0)
#
# mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
#
# mouse_event(MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
# except KeyboardInterrupt:
# if t.is_alive():
# t.stop()
# else:
# break |
983,582 | 4db0d315b2e4911258a17827f19fcce10f290ab0 | import fasttext
import config
from multiprocessing import Pool
from paramters import Parameters
from itertools import product
from utilities import remove_folder
def get_embedding_name(model, dim, lr, windows, epoch):
w2v_model_name = '{}/w2v/w2v-{}-{}-{}-{}-{}'.format(config.root, model, dim, lr, windows, epoch)
return w2v_model_name
def train_embedding(model, dim, lr, windows, epoch, first=False):
if first: remove_folder('{}/w2v'.format(config.root))
if model == 'skip':
func = fasttext.skipgram
elif model == 'cbow':
func = fasttext.cbow
w2v_model_name = get_embedding_name(model, dim, lr, windows, epoch)
print('training embedding {}'.format(w2v_model_name))
func(config.line_corpus, w2v_model_name, dim=dim, lr=lr, ws=windows, epoch=epoch, thread=60)
print('embedding end: {}'.format(w2v_model_name))
if __name__ == '__main__':
cpu_number = 60
pool = Pool(processes=cpu_number)
P = Parameters
for arg in product(P.models, P.dimensons, P.learning_rates, P.ws, P.epochs):
train_embedding(*arg)
# pool.starmap(train_embedding, product(P.models, P.dimensons, P.learning_rates, P.ws, P.epochs))
|
983,583 | f1b0af6a7f8615db16c742b7fbbc914a91ec3da5 | '''
Created by Deepti Mahesh
5/10/2019
S: total records
bucket_index: initial hash function, ie modulo 2
new_index: new hash function (next one) initialized to modulo 4
bucket_count: initialized as 2
Main mem = 2
Buffer size = 20
'''
import sys
from random import random
import time as tik
import os
def insert_hash(num):
'''
Insert into buckets unless bucket > 75% full
'''
global S, count_total, repetitions
hash_val = get_hash_val(num)
flag, a = 1, 1
#create bucket
if hash_val not in buckets:
buckets[hash_val] = [[]]
for i in range(count_arr[hash_val]):
if num not in buckets[hash_val][i]:
flag = 0
if a == 1:
if flag == 0:
lentemp = count_arr[hash_val]
S += 1
temp = lentemp - 1
if len(buckets[hash_val][temp]) >= (5.0):
temp, lentemp, count_total = temp + 1, lentemp + 1, \
count_total + 1
count_arr[hash_val] = lentemp
# empty array append
buckets[hash_val].append([])
buckets[hash_val][temp].append(num)
repetitions = append_rep(num)
density = (S * 20.0) / count_total
if density >= (75.0*100)/100:
make_new()
def append_rep(num):
'''
Append to_print
'''
global repetitions
repetitions.append(num)
if len(repetitions) >= (5.0):
repetitions = print_result(repetitions)
return repetitions
def get_hash_val(n, flag = 0):
'''
Compute Hash value to sort record into bucket
'''
if flag == 0:
result = n % bucket_index
if result < index:
result = n % new_index
return result
else:
return n % new_index
def print_result(buff):
'''
Prints result wanted
'''
for val in buff:
print(val)
buff = []
return buff
def update_global():
'''
'''
global index, bucket_index, new_index, bucket_count
index = index + 1
if bucket_count == new_index:
bucket_index, new_index, index = bucket_index*2, bucket_index*2, 0
def to_replace():
global count_total, count_arr
global index
arr, not_replace = [], []
leny = count_arr[index]
for i in range(leny):
for value in buckets[index][i]:
arr.append(value)
count_total -= int(count_arr[index])
not_replace.append(count_total)
return arr
def make_new():
'''
Create new bucket
'''
global bucket_count, count_total
global index, bucket_index, new_index
bucket_count += 1
replace_array = to_replace()
buckets[index], count_arr[index], count_total = [[]], 1, count_total + 1
buckets[bucket_count - 1] = [[]]
count_arr[bucket_count - 1], count_total = 1, count_total + 1
count_total += 1
for value in replace_array:
hash_val = get_hash_val(value, flag = 1)
flag, a, b = 1, 1, 3
if hash_val not in buckets:
buckets[hash_val] = [[]]
count_arr[hash_val] = 1
count_total += 1
if a == 1:
for j in range(count_arr[hash_val]):
if value not in buckets[hash_val][j]:
flag = 0
if flag == 0:
if a == 1:
temp = count_arr[hash_val] - 1
if b != 0:
temp = count_arr[hash_val] - 1
if len(buckets[hash_val][temp]) >= 5.0:
count_arr[hash_val] += 1
count_total, temp = count_total + 1, temp + 1
buckets[hash_val].append([])
buckets[hash_val][temp].append(value)
update_global()
#Start of main
input_buffer, repetitions = [], []
S = 0
bucket_count = 2
#Initialize dictionary
buckets, count_arr = {}, {0: 1, 1: 1}
count_total = 2
index = 0
bucket_index, new_index = 2, 4
with open(sys.argv[1]) as filey:
for line in filey:
num = line.strip()
input_buffer.append(int(num))
if len(input_buffer) >= 5.0:
for val in input_buffer:
insert_hash(val)
input_buffer = []
for val in input_buffer:
insert_hash(val)
input_buffer = []
repetitions = print_result(repetitions) |
983,584 | c3bc4daac28d69cb1df3bad3cf9edeea3bbbcdf1 | #!/usr/bin/env python
#
#
from gozerbot.generic import geturl
from gozerbot.examples import examples
from gozerbot.plugins import plugins
from gozerbot.thr import start_new_thread
import random, os, time
plugins.regplugins()
donot = ['reboot', 'cycle', 'loglevel', 'quit', 'email', 'meet', 'nick', \
'part', 'cc', 'chat', 'join', ' nick', 'update', 'install', \
'reconnect', 'jump', 'nonfree', 'relay', 'rss', 'fleet', 'sendraw', \
'upgrade', 'alarm', 'remind', 'intro', 'host', 'ip', 'alarm', 'tests', \
'unload', 'delete', 'dfwiki', 'dig', 'silent', 'reconnect', 'switch', 'op',
'dict', 'slashdot', 'films', 'latest', 'weather', 'coll', 'web', 'mail', \
'markov', 'probe', 'sc']
def dowebtest(nrloop):
a = examples.getexamples()
teller = 0
while 1:
nrloop -= 1
if nrloop == 0:
break
random.shuffle(a)
for z in a:
teller += 1
no = 0
for zz in donot:
if z.find(zz) != -1:
no = 1
break
print z
try:
print geturl('http://localhost:8088/dispatch?command=%s' % z)
except IOError:
pass
except:
os._exit(0)
for i in range(100):
start_new_thread(dowebtest, (10, ))
try:
while 1:
time.sleep(1)
except:
os._exit(0)
|
983,585 | 20f9c7b8870ac88e865c9d419e7a890ce7d1fea3 | # -*- coding: utf-8 -*-
"""
Plot the x-ray spectra downloaded from the [Siemens simulator]
(https://w9.siemens.com/cms/oemproducts/Home/X-rayToolbox/spektrum/)
"""
import matplotlib.pylab as plt
import os
import scipy
import numpy as np
from scipy.integrate import trapz
# http://stackoverflow.com/a/11249430/323100
Spectrapath = '/afs/psi.ch/project/EssentialMed/Dev/Spectra'
Spectra = [
(os.path.join(Spectrapath, 'Xray-Spectrum_040kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_046kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_053kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_060kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_070kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_080kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_090kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_100kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_100kV.txt')),
(os.path.join(Spectrapath, 'Xray-Spectrum_120kV.txt'))]
Data = [(np.loadtxt(FileName)) for FileName in Spectra]
Energy = [int(open(FileName).readlines()[2].split()[4]) for FileName in Spectra]
Mean = [float(open(FileName).readlines()[5].split()[3]) for FileName in Spectra]
for i in range(len(Spectra)):
plt.plot(Data[i][:, 0], Data[i][:, 1],
label=str(Energy[i]) + 'kV, Mean=' +
str(round(Mean[i], 3)) + 'keV')
plt.legend(loc='best')
plt.title('X-ray spectra')
plt.xlabel('Energy [kV]')
plt.ylabel('Photons')
plt.savefig('plot.pdf')
plt.figure(figsize=[22, 5])
for counter, spectra in enumerate(Spectra):
plt.subplot(1, len(Spectra), counter + 1)
plt.plot(Data[counter][:, 0], Data[counter][:, 1])
Integral = scipy.integrate.trapz(Data[counter][:, 1], Data[counter][:, 0])
print 'The integral for', Energy[counter], 'kV is', \
str(round(Integral / 1e6, 3)) + 'e6 photons'
plt.title(str(Energy[counter]) + 'kV\n' +
str(round(Integral / 1e6, 3)) + 'e6 photons')
plt.xlim([0, 150])
plt.ylim([0, 3e6])
# Turn off y-ticks for subplots 2-end (counter >0)
if counter:
plt.setp(plt.gca().get_yticklabels(), visible=False)
plt.tight_layout()
plt.show()
|
983,586 | a4cbb4a4d372af97aa7b7a829d84506b378840da | class Solution:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
first, sec = coordinates[0], coordinates[1]
if first[0] == sec[0]:
slope = first[0]
flag = True
else:
slope = (sec[1]-first[1])/(sec[0]-first[0])
flag = False
for item in coordinates[2:]:
if flag == True:
if item[0] != slope:
return False
else:
if (item[0]-first[0] == 0):
return False
elif slope != (item[1]-first[1])/(item[0]-first[0]) :
return False
return True |
983,587 | bcf17ed3bbe1ae84ef06a17f3efdf27495a2b5c1 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for linker-specific test cases.
The custom dynamic linker can only be tested through a custom test case
for various technical reasons:
- It's an 'invisible feature', i.e. it doesn't expose a new API or
behaviour, all it does is save RAM when loading native libraries.
- Checking that it works correctly requires several things that do not
fit the existing GTest-based and instrumentation-based tests:
- Native test code needs to be run in both the browser and renderer
process at the same time just after loading native libraries, in
a completely asynchronous way.
- Each test case requires restarting a whole new application process
with a different command-line.
- Enabling test support in the Linker code requires building a special
APK with a flag to activate special test-only support code in the
Linker code itself.
Host-driven tests have also been tried, but since they're really
sub-classes of instrumentation tests, they didn't work well either.
To build and run the linker tests, do the following:
ninja -C out/Debug content_linker_test_apk
build/android/test_runner.py linker
"""
import logging
import os
import re
import StringIO
import subprocess
import tempfile
import time
from pylib import constants
from pylib import android_commands
from pylib import flag_changer
from pylib.base import base_test_result
ResultType = base_test_result.ResultType
_PACKAGE_NAME='org.chromium.content_linker_test_apk'
_ACTIVITY_NAME='.ContentLinkerTestActivity'
_COMMAND_LINE_FILE='/data/local/tmp/content-linker-test-command-line'
# Path to the Linker.java source file.
_LINKER_JAVA_SOURCE_PATH = \
'content/public/android/java/src/org/chromium/content/app/Linker.java'
# A regular expression used to extract the browser shared RELRO configuration
# from the Java source file above.
_RE_LINKER_BROWSER_CONFIG = \
re.compile(r'.*BROWSER_SHARED_RELRO_CONFIG\s+=\s+' + \
'BROWSER_SHARED_RELRO_CONFIG_(\S+)\s*;.*',
re.MULTILINE | re.DOTALL)
# Logcat filters used during each test. Only the 'chromium' one is really
# needed, but the logs are added to the TestResult in case of error, and
# it is handy to have the 'content_android_linker' ones as well when
# troubleshooting.
_LOGCAT_FILTERS = [ '*:s', 'chromium:v', 'content_android_linker:v' ]
#_LOGCAT_FILTERS = [ '*:v' ] ## DEBUG
# Regular expression used to match status lines in logcat.
re_status_line = re.compile(r'(BROWSER|RENDERER)_LINKER_TEST: (FAIL|SUCCESS)')
# Regular expression used to mach library load addresses in logcat.
re_library_address = re.compile(
r'(BROWSER|RENDERER)_LIBRARY_ADDRESS: (\S+) ([0-9A-Fa-f]+)')
def _GetBrowserSharedRelroConfig():
"""Returns a string corresponding to the Linker's configuration of shared
RELRO sections in the browser process. This parses the Java linker source
file to get the appropriate information.
Return:
None in case of error (e.g. could not locate the source file).
'NEVER' if the browser process shall never use shared RELROs.
'LOW_RAM_ONLY' if if uses it only on low-end devices.
'ALWAYS' if it always uses a shared RELRO.
"""
source_path = \
os.path.join(constants.DIR_SOURCE_ROOT, _LINKER_JAVA_SOURCE_PATH)
if not os.path.exists(source_path):
logging.error('Could not find linker source file: ' + source_path)
return None
with open(source_path) as f:
configs = _RE_LINKER_BROWSER_CONFIG.findall(f.read())
if not configs:
logging.error(
'Can\'t find browser shared RELRO configuration value in ' + \
source_path)
return None
if configs[0] not in ['NEVER', 'LOW_RAM_ONLY', 'ALWAYS']:
logging.error('Unexpected browser config value: ' + configs[0])
return None
logging.info('Found linker browser shared RELRO config: ' + configs[0])
return configs[0]
def _WriteCommandLineFile(adb, command_line, command_line_file):
"""Create a command-line file on the device. This does not use FlagChanger
because its implementation assumes the device has 'su', and thus does
not work at all with production devices."""
adb.RunShellCommand('echo "%s" > %s' % (command_line, command_line_file))
def _CheckLinkerTestStatus(logcat):
"""Parse the content of |logcat| and checks for both a browser and
renderer status line.
Args:
logcat: A string to parse. Can include line separators.
Returns:
A tuple, result[0] is True if there is a complete match, then
result[1] and result[2] will be True or False to reflect the
test status for the browser and renderer processes, respectively.
"""
browser_found = False
renderer_found = False
for m in re_status_line.finditer(logcat):
process_type, status = m.groups()
if process_type == 'BROWSER':
browser_found = True
browser_success = (status == 'SUCCESS')
elif process_type == 'RENDERER':
renderer_found = True
renderer_success = (status == 'SUCCESS')
else:
assert False, 'Invalid process type ' + process_type
if browser_found and renderer_found:
return (True, browser_success, renderer_success)
# Didn't find anything.
return (False, None, None)
def _WaitForLinkerTestStatus(adb, timeout):
"""Wait up to |timeout| seconds until the full linker test status lines appear
in the logcat being recorded with |adb|.
Args:
adb: An AndroidCommands instance. This assumes adb.StartRecordingLogcat()
was called previously.
timeout: Timeout in seconds.
Returns:
ResultType.TIMEOUT in case of timeout, ResulType.PASS if both status lines
report 'SUCCESS', or ResulType.FAIL otherwise.
"""
def _StartActivityAndWaitForLinkerTestStatus(adb, timeout):
"""Force-start an activity and wait up to |timeout| seconds until the full
linker test status lines appear in the logcat, recorded through |adb|.
Args:
adb: An AndroidCommands instance.
timeout: Timeout in seconds
Returns:
A (status, logs) tuple, where status is a ResultType constant, and logs
if the final logcat output as a string.
"""
# 1. Start recording logcat with appropriate filters.
adb.StartRecordingLogcat(clear=True, filters=_LOGCAT_FILTERS)
try:
# 2. Force-start activity.
adb.StartActivity(package=_PACKAGE_NAME,
activity=_ACTIVITY_NAME,
force_stop=True)
# 3. Wait up to |timeout| seconds until the test status is in the logcat.
num_tries = 0
max_tries = timeout
found = False
while num_tries < max_tries:
time.sleep(1)
num_tries += 1
found, browser_ok, renderer_ok = _CheckLinkerTestStatus(
adb.GetCurrentRecordedLogcat())
if found:
break
finally:
logs = adb.StopRecordingLogcat()
if num_tries >= max_tries:
return ResultType.TIMEOUT, logs
if browser_ok and renderer_ok:
return ResultType.PASS, logs
return ResultType.FAIL, logs
class LibraryLoadMap(dict):
"""A helper class to pretty-print a map of library names to load addresses."""
def __str__(self):
items = ['\'%s\': 0x%x' % (name, address) for \
(name, address) in self.iteritems()]
return '{%s}' % (', '.join(items))
def __repr__(self):
return 'LibraryLoadMap(%s)' % self.__str__()
class AddressList(list):
"""A helper class to pretty-print a list of load addresses."""
def __str__(self):
items = ['0x%x' % address for address in self]
return '[%s]' % (', '.join(items))
def __repr__(self):
return 'AddressList(%s)' % self.__str__()
def _ExtractLibraryLoadAddressesFromLogcat(logs):
"""Extract the names and addresses of shared libraries loaded in the
browser and renderer processes.
Args:
logs: A string containing logcat output.
Returns:
A tuple (browser_libs, renderer_libs), where each item is a map of
library names (strings) to library load addresses (ints), for the
browser and renderer processes, respectively.
"""
browser_libs = LibraryLoadMap()
renderer_libs = LibraryLoadMap()
for m in re_library_address.finditer(logs):
process_type, lib_name, lib_address = m.groups()
lib_address = int(lib_address, 16)
if process_type == 'BROWSER':
browser_libs[lib_name] = lib_address
elif process_type == 'RENDERER':
renderer_libs[lib_name] = lib_address
else:
assert False, 'Invalid process type'
return browser_libs, renderer_libs
def _CheckLoadAddressRandomization(lib_map_list, process_type):
"""Check that a map of library load addresses is random enough.
Args:
lib_map_list: a list of dictionaries that map library names (string)
to load addresses (int). Each item in the list corresponds to a
different run / process start.
process_type: a string describing the process type.
Returns:
(status, logs) tuple, where <status> is True iff the load addresses are
randomized, False otherwise, and <logs> is a string containing an error
message detailing the libraries that are not randomized properly.
"""
# Collect, for each library, its list of load addresses.
lib_addr_map = {}
for lib_map in lib_map_list:
for lib_name, lib_address in lib_map.iteritems():
if lib_name not in lib_addr_map:
lib_addr_map[lib_name] = AddressList()
lib_addr_map[lib_name].append(lib_address)
logging.info('%s library load map: %s', process_type, lib_addr_map)
# For each library, check the randomness of its load addresses.
bad_libs = {}
success = True
for lib_name, lib_address_list in lib_addr_map.iteritems():
# If all addresses are different, skip to next item.
lib_address_set = set(lib_address_list)
# Consider that if there is more than one pair of identical addresses in
# the list, then randomization is broken.
if len(lib_address_set) < len(lib_address_list) - 1:
bad_libs[lib_name] = lib_address_list
if bad_libs:
return False, '%s libraries failed randomization: %s' % \
(process_type, bad_libs)
return True, '%s libraries properly randomized: %s' % \
(process_type, lib_addr_map)
class LinkerTestCaseBase(object):
"""Base class for linker test cases."""
def __init__(self, is_low_memory=False):
"""Create a test case.
Args:
is_low_memory: True to simulate a low-memory device, False otherwise.
"""
self.is_low_memory = is_low_memory
if is_low_memory:
test_suffix = 'ForLowMemoryDevice'
else:
test_suffix = 'ForRegularDevice'
class_name = self.__class__.__name__
self.qualified_name = '%s.%s' % (class_name, test_suffix)
self.tagged_name = self.qualified_name
def _RunTest(self, adb):
"""Run the test, must be overriden.
Args:
adb: An AndroidCommands instance to the device.
Returns:
A (status, log) tuple, where <status> is a ResultType constant, and <log>
is the logcat output captured during the test in case of error, or None
in case of success.
"""
return ResultType.FAIL, 'Unimplemented _RunTest() method!'
def Run(self, device):
"""Run the test on a given device.
Args:
device: Name of target device where to run the test.
Returns:
A base_test_result.TestRunResult() instance.
"""
margin = 8
print '[ %-*s ] %s' % (margin, 'RUN', self.tagged_name)
logging.info('Running linker test: %s', self.tagged_name)
adb = android_commands.AndroidCommands(device)
# Create command-line file on device.
command_line_flags = ''
if self.is_low_memory:
command_line_flags = '--low-memory-device'
_WriteCommandLineFile(adb, command_line_flags, _COMMAND_LINE_FILE)
# Run the test.
status, logs = self._RunTest(adb)
result_text = 'OK'
if status == ResultType.FAIL:
result_text = 'FAILED'
elif status == ResultType.TIMEOUT:
result_text = 'TIMEOUT'
print '[ %*s ] %s' % (margin, result_text, self.tagged_name)
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(
self.tagged_name,
status,
logs))
return results
def __str__(self):
return self.tagged_name
def __repr__(self):
return self.tagged_name
class LinkerSharedRelroTest(LinkerTestCaseBase):
"""A linker test case to check the status of shared RELRO sections.
The core of the checks performed here are pretty simple:
- Clear the logcat and start recording with an appropriate set of filters.
- Create the command-line appropriate for the test-case.
- Start the activity (always forcing a cold start).
- Every second, look at the current content of the filtered logcat lines
and look for instances of the following:
BROWSER_LINKER_TEST: <status>
RENDERER_LINKER_TEST: <status>
where <status> can be either FAIL or SUCCESS. These lines can appear
in any order in the logcat. Once both browser and renderer status are
found, stop the loop. Otherwise timeout after 30 seconds.
Note that there can be other lines beginning with BROWSER_LINKER_TEST:
and RENDERER_LINKER_TEST:, but are not followed by a <status> code.
- The test case passes if the <status> for both the browser and renderer
process are SUCCESS. Otherwise its a fail.
"""
def _RunTest(self, adb):
# Wait up to 30 seconds until the linker test status is in the logcat.
return _StartActivityAndWaitForLinkerTestStatus(adb, timeout=30)
class LinkerLibraryAddressTest(LinkerTestCaseBase):
"""A test case that verifies library load addresses.
The point of this check is to ensure that the libraries are loaded
according to the following rules:
- For low-memory devices, they should always be loaded at the same address
in both browser and renderer processes, both below 0x4000_0000.
- For regular devices, the browser process should load libraries above
0x4000_0000, and renderer ones below it.
"""
def _RunTest(self, adb):
result, logs = _StartActivityAndWaitForLinkerTestStatus(adb, timeout=30)
# Return immediately in case of timeout.
if result == ResultType.TIMEOUT:
return result, logs
# Collect the library load addresses in the browser and renderer processes.
browser_libs, renderer_libs = _ExtractLibraryLoadAddressesFromLogcat(logs)
logging.info('Browser libraries: %s', browser_libs)
logging.info('Renderer libraries: %s', renderer_libs)
# Check that the same libraries are loaded into both processes:
browser_set = set(browser_libs.keys())
renderer_set = set(renderer_libs.keys())
if browser_set != renderer_set:
logging.error('Library set mistmach browser=%s renderer=%s',
browser_libs.keys(), renderer_libs.keys())
return ResultType.FAIL, logs
# And that there are not empty.
if not browser_set:
logging.error('No libraries loaded in any process!')
return ResultType.FAIL, logs
# Check that the renderer libraries are loaded at 'low-addresses'. i.e.
# below 0x4000_0000, for every kind of device.
memory_boundary = 0x40000000
bad_libs = []
for lib_name, lib_address in renderer_libs.iteritems():
if lib_address >= memory_boundary:
bad_libs.append((lib_name, lib_address))
if bad_libs:
logging.error('Renderer libraries loaded at high addresses: %s', bad_libs)
return ResultType.FAIL, logs
browser_config = _GetBrowserSharedRelroConfig()
if not browser_config:
return ResultType.FAIL, 'Bad linker source configuration'
if browser_config == 'ALWAYS' or \
(browser_config == 'LOW_RAM_ONLY' and self.is_low_memory):
# The libraries must all be loaded at the same addresses. This also
# implicitly checks that the browser libraries are at low addresses.
addr_mismatches = []
for lib_name, lib_address in browser_libs.iteritems():
lib_address2 = renderer_libs[lib_name]
if lib_address != lib_address2:
addr_mismatches.append((lib_name, lib_address, lib_address2))
if addr_mismatches:
logging.error('Library load address mismatches: %s',
addr_mismatches)
return ResultType.FAIL, logs
# Otherwise, check that libraries are loaded at 'high-addresses'.
# Note that for low-memory devices, the previous checks ensure that they
# were loaded at low-addresses.
else:
bad_libs = []
for lib_name, lib_address in browser_libs.iteritems():
if lib_address < memory_boundary:
bad_libs.append((lib_name, lib_address))
if bad_libs:
logging.error('Browser libraries loaded at low addresses: %s', bad_libs)
return ResultType.FAIL, logs
# Everything's ok.
return ResultType.PASS, logs
class LinkerRandomizationTest(LinkerTestCaseBase):
"""A linker test case to check that library load address randomization works
properly between successive starts of the test program/activity.
This starts the activity several time (each time forcing a new process
creation) and compares the load addresses of the libraries in them to
detect that they have changed.
In theory, two successive runs could (very rarely) use the same load
address, so loop 5 times and compare the values there. It is assumed
that if there are more than one pair of identical addresses, then the
load addresses are not random enough for this test.
"""
def _RunTest(self, adb):
max_loops = 5
browser_lib_map_list = []
renderer_lib_map_list = []
logs_list = []
for loop in range(max_loops):
# Start the activity.
result, logs = _StartActivityAndWaitForLinkerTestStatus(adb, timeout=30)
if result == ResultType.TIMEOUT:
# Something bad happened. Return immediately.
return result, logs
# Collect library addresses.
browser_libs, renderer_libs = _ExtractLibraryLoadAddressesFromLogcat(logs)
browser_lib_map_list.append(browser_libs)
renderer_lib_map_list.append(renderer_libs)
logs_list.append(logs)
# Check randomization in the browser libraries.
logs = '\n'.join(logs_list)
browser_status, browser_logs = _CheckLoadAddressRandomization(
browser_lib_map_list, 'Browser')
renderer_status, renderer_logs = _CheckLoadAddressRandomization(
renderer_lib_map_list, 'Renderer')
browser_config = _GetBrowserSharedRelroConfig()
if not browser_config:
return ResultType.FAIL, 'Bad linker source configuration'
if not browser_status:
if browser_config == 'ALWAYS' or \
(browser_config == 'LOW_RAM_ONLY' and self.is_low_memory):
return ResultType.FAIL, browser_logs
# IMPORTANT NOTE: The system's ASLR implementation seems to be very poor
# when starting an activity process in a loop with "adb shell am start".
#
# When simulating a regular device, loading libraries in the browser
# process uses a simple mmap(NULL, ...) to let the kernel device where to
# load the file (this is similar to what System.loadLibrary() does).
#
# Unfortunately, at least in the context of this test, doing so while
# restarting the activity with the activity manager very, very, often
# results in the system using the same load address for all 5 runs, or
# sometimes only 4 out of 5.
#
# This has been tested experimentally on both Android 4.1.2 and 4.3.
#
# Note that this behaviour doesn't seem to happen when starting an
# application 'normally', i.e. when using the application launcher to
# start the activity.
logging.info('Ignoring system\'s low randomization of browser libraries' +
' for regular devices')
if not renderer_status:
return ResultType.FAIL, renderer_logs
return ResultType.PASS, logs
class LinkerLowMemoryThresholdTest(LinkerTestCaseBase):
"""This test checks that the definitions for the low-memory device physical
RAM threshold are identical in the base/ and linker sources. Because these
two components should absolutely not depend on each other, it's difficult
to perform this check correctly at runtime inside the linker test binary
without introducing hairy dependency issues in the build, or complicated
plumbing at runtime.
To work-around this, this test looks directly into the sources for a
definition of the same constant that should look like:
#define ANDROID_LOW_MEMORY_DEVICE_THRESHOLD_MB <number>
And will check that the values for <number> are identical in all of
them."""
# A regular expression used to find the definition of the threshold in all
# sources:
_RE_THRESHOLD_DEFINITION = re.compile(
r'^\s*#\s*define\s+ANDROID_LOW_MEMORY_DEVICE_THRESHOLD_MB\s+(\d+)\s*$',
re.MULTILINE)
# The list of source files, relative to DIR_SOURCE_ROOT, which must contain
# a line that matches the re above.
_SOURCES_LIST = [
'base/android/sys_utils.cc',
'content/common/android/linker/linker_jni.cc' ]
def _RunTest(self, adb):
failure = False
values = []
# First, collect all the values in all input sources.
re = LinkerLowMemoryThresholdTest._RE_THRESHOLD_DEFINITION
for source in LinkerLowMemoryThresholdTest._SOURCES_LIST:
source_path = os.path.join(constants.DIR_SOURCE_ROOT, source);
if not os.path.exists(source_path):
logging.error('Missing source file: ' + source_path)
failure = True
continue
with open(source_path) as f:
source_text = f.read()
# For some reason, re.match() never works here.
source_values = re.findall(source_text)
if not source_values:
logging.error('Missing low-memory threshold definition in ' + \
source_path)
logging.error('Source:\n%s\n' % source_text)
failure = True
continue
values += source_values
# Second, check that they are all the same.
if not failure:
for value in values[1:]:
if value != values[0]:
logging.error('Value mismatch: ' + repr(values))
failure = True
if failure:
return ResultType.FAIL, 'Incorrect low-end memory threshold definitions!'
return ResultType.PASS, ''
|
983,588 | 8f7deb3680520385a9c66ea7030c113caacf0abe | #Given an array of ints, return the sum of the first 2 elements in the array. If the array length is less than 2, just sum up the elements that exist, returning 0 if the array is length 0.
#sum2([1, 2, 3]) - 3
#sum2([1, 1]) - 2
#sum2([1, 1, 1, 1]) -2
def sum2(nums):
a=len(nums)
if(a>=2):
sumb=nums[0]+nums[1]
elif(a==1):
sumb=nums[0]
else:
sumb=0
return sumb
|
983,589 | 825a8f1b8f3674653abe83982541596f3f6e6c7b | import math
import numpy as np
import pandas as pd
from scipy.spatial.distance import cosine, euclidean
from skll.metrics import kappa
import seaborn as sns
from utils.utils import Cache
import matplotlib.pylab as plt
def yulesQ(x, y):
a = ((x==1) & (y==1)).sum()
b = ((x==1) & (y==0)).sum()
c = ((x==0) & (y==1)).sum()
d = ((x==0) & (y==0)).sum()
OR = (a * d) / (b * c)
return (OR - 1) / (OR + 1)
def accuracy(x, y):
a = ((x==1) & (y==1)).sum()
b = ((x==1) & (y==0)).sum()
c = ((x==0) & (y==1)).sum()
d = ((x==0) & (y==0)).sum()
return (a + d) / (a + b + c +d)
def jaccard(x, y):
d = ((x==1) & (y==1)).sum()
b = ((x==1) & (y==0)).sum()
c = ((x==0) & (y==1)).sum()
a = ((x==0) & (y==0)).sum()
return (a) / (a + b + c)
def sokal(x, y):
a = ((x==1) & (y==1)).sum()
b = ((x==1) & (y==0)).sum()
c = ((x==0) & (y==1)).sum()
d = ((x==0) & (y==0)).sum()
return (a + d) / (a + b + c +d)
def ochiai(x, y):
d = ((x==1) & (y==1)).sum()
b = ((x==1) & (y==0)).sum()
c = ((x==0) & (y==1)).sum()
a = ((x==0) & (y==0)).sum()
return (a) / math.sqrt((a + b) * (a + c))
def kappa_own(x, y):
a = ((x==1) & (y==1)).sum()
b = ((x==1) & (y==0)).sum()
c = ((x==0) & (y==1)).sum()
d = ((x==0) & (y==0)).sum()
n = a + b + c + d
po = (a + d) / n
pe = ((a + b) * (a + c) + (b + d) * (c + d)) / (n ** 2)
return (po - pe) / (1 - pe)
def links(x, y):
return (x & y).sum()
def remove_nans(df, to_zero=True):
if to_zero:
df[np.isnan(df)] = 0
return df
filter = np.isnan(df).sum() < len(df) / 2
df = df.loc[filter, filter]
while np.isnan(df).sum().sum() > 0:
worst = np.isnan(df).sum().argmax()
df = df.loc[df.index != worst, df.index != worst]
return df
def pairwise_metric(df, metric, min_periods=1, prefect_fit=1.):
mat = df.as_matrix().T
K = len(df.columns)
met = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = prefect_fit
elif not valid.all():
c = metric(ac[valid], bc[valid])
else:
c = metric(ac, bc)
met[i, j] = c
met[j, i] = c
return remove_nans(pd.DataFrame(met, index=df.columns, columns=df.columns))
def similarity_pearson(data, cache=None, min_periods=1):
if 'student' in data.columns:
data['correct'] = data['correct'].astype(float)
data = data.pivot('student', 'item', 'correct')
return remove_nans(data.corr(min_periods=min_periods))
@Cache()
def similarity_kappa(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, kappa_own)
@Cache()
def similarity_kappa2(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, kappa)
@Cache()
def similarity_cosine(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, cosine)
@Cache()
def similarity_euclidean(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, euclidean, prefect_fit=0.)
@Cache()
def similarity_yulesQ(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, yulesQ)
@Cache()
def similarity_ochiai(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, ochiai)
@Cache()
def similarity_sokal(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, sokal)
@Cache()
def similarity_accuracy(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, accuracy)
@Cache()
def similarity_jaccard(data, cache=None):
if 'student' in data.columns:
data = data.pivot('student', 'item', 'correct')
return pairwise_metric(data, jaccard)
def wsimilarity_links(data, trash_hold=None):
if trash_hold is None:
trash_hold = data.median()
return pairwise_metric(data > trash_hold, links)
def similarity_double_pearson(answers):
return similarity_pearson(similarity_pearson(answers))
def plot_similarity_hist(X, ground_truth, similarity_name, lim=True):
same, different = [], []
for concept1 in set(ground_truth):
for concept2 in set(ground_truth):
values = list(X.loc[ground_truth == concept1, ground_truth == concept2].values.flatten())
values = [x for x in values if x != 1]
if concept1 == concept2:
same += values
elif concept1 > concept2:
different += values
if similarity_name.endswith('links'):
sns.distplot(same)
if len(different):
sns.distplot(different)
elif not similarity_name.endswith('euclid'):
if lim:
plt.xlim([-1,1])
sns.distplot(same)
if len(different):
sns.distplot(different)
else:
if len(different):
if lim:
plt.xlim([-max(different), 0])
sns.distplot(-np.array(different))
sns.distplot(-np.array(same))
|
983,590 | 07bcdae7ab81f5b590c4e05da9aae0a21af306eb | import gensim
import pyLDAvis.gensim
dictionary = gensim.corpora.Dictionary.load('ch6/gensim/id2word.dict')
corpus = gensim.corpora.MmCorpus('ch6/gensim/corpus.mm')
lda = gensim.models.ldamodel.LdaModel.load('ch6/gensim/lda_gensim.model')
lda_prepared = pyLDAvis.gensim.prepare(lda, corpus, dictionary)
pyLDAvis.show(lda_prepared)
pyLDAvis.save_html(lda_prepared, 'ch8/lda.html') |
983,591 | 8eb971f3e33cfbbf16f2815ba6e6c7ba4d4114dc | import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras.optimizers import RMSprop
import os
import numpy as np
train_dir = 'E:\Code\Projects\Education\Tensorflow_git\Coursera_Specialization\CNN_Course_2\Week_4\RPS\\train'
test_dir = 'E:\Code\Projects\Education\Tensorflow_git\Coursera_Specialization\CNN_Course_2\Week_4\RPS\\test'
train_datagen = ImageDataGenerator(rescale=1./255,
horizontal_flip=True,
height_shift_range=0.2,
width_shift_range=0.2,
zoom_range=0.5)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir,
target_size=(300, 300),
batch_size=128,
class_mode='categorical')
test_generator = test_datagen.flow_from_directory(test_dir,
target_size=(300, 300),
batch_size=128,
class_mode='categorical'
)
model = tf.keras.models.Sequential([
layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),
layers.MaxPooling2D(2, 2),
layers.Conv2D(32, (3, 3), activation='relu'),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dense(3, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(learning_rate=0.001), metrics=['acc'])
model.fit(train_generator,
epochs=20,
validation_data=test_generator,
workers=15,
max_queue_size=100)
root_dir = 'RPS/val/'
for _, _, files in os.walk(root_dir):
for file in files:
img = tf.keras.preprocessing.image.load_img(os.path.join(root_dir, file),
color_mode='rgb',
target_size=(300, 300))
x = tf.keras.preprocessing.image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images)
print(file)
if np.argmax(classes) == 0:
print('Paper')
elif np.argmax(classes) == 1:
print('Rock')
else: print('Scissors')
|
983,592 | 6905925f087b638248e0914b438158edc45587f5 | from django.urls import include, path
from django.conf.urls import url
from . import views
from rest_framework import routers
app_name = 'website'
# Set up the router for the API URLs
router = routers.DefaultRouter()
router.register(r'purchase', views.PurchaseViewSet)
router.register(r'concession', views.ConcessionViewSet)
router.register(r'usage', views.UsageViewSet)
urlpatterns = [
path('register/', views.register, name='register'),
path('api/', include(router.urls)),
path('', views.customer_login, name='login'),
path('logout/', views.customer_logout, name='logout'),
path('connect/', views.connect, name='connect'),
path('purchases/', views.purchases, name='purchases'),
path('concessions/', views.concessions, name='concessions'),
path('usage/', views.usage, name='usage'),
url(r'^delete/(?P<pk>[0-9]+)/$', views.disconnect, name='disconnect')
]
|
983,593 | 77eb1de6da129c78ebea4fef1badb53a7012587a | '''
This file will compile origional source sol file to get ABI information, which is necessary for decode transaction
input data from etherscan
'''
import subprocess
import re
import os
import ntpath
import getopt, sys
import argparse
def get_compiler_version(sol_file_path):
if sol_file_path.endswith("sol") is False:
raise Exception("The input file is not a sol file")
with open(sol_file_path, 'r') as src_file:
lines = src_file.readlines()
re_patten = "pragma solidity .*;"
version = ""
for l in lines:
solc_version = re.findall(re_patten, l)
if len(solc_version) > 0:
version = solc_version[0].split(" ")[2]
if version[0].isdigit() is False:
version = version[1:len(version)-1]
break
if version == "":
version = "0.4.26" # Can not find version specified in sol file, use solc 0.4.26 as default version
return version
def compile_sol_file_get_abi(sol_file_path, output_path, hash):
head, tail = ntpath.split(sol_file_path)
abi_tail = hash + "-abi.json"
solc_version = get_compiler_version(sol_file_path)
subprocess.run(['solc-select', 'use', solc_version])
abi_file_path = os.path.join(output_path, abi_tail)
abi_file_obj = open(abi_file_path, 'w+')
subprocess.run(['solc', '--abi', sol_file_path], stdout=abi_file_obj)
abi_file_obj.close()
def compile_sol_file_get_binary(sol_file_path, output_path, hash):
head, tail = ntpath.split(sol_file_path)
binary_tail = hash + "-binary.txt"
solc_version = get_compiler_version(sol_file_path)
subprocess.run(['solc-select', 'use', solc_version])
binary_file_path = os.path.join(output_path, binary_tail)
binary_file_obj = open(binary_file_path, 'w+')
subprocess.run(['solc', '--bin', sol_file_path], stdout=binary_file_obj)
binary_file_obj.close()
# args: sol_file_path, output_file, hash
if __name__ == "__main__":
argumentList = sys.argv[1:]
path = '/home/alex/Desktop/real_contracts_dataset_for_repair/DAO/0x2eae96e6bf99565c9d8ce978b24c3fc3b552dc7b.sol'
compile_sol_file_get_binary(argumentList[0], argumentList[1], argumentList[2])
compile_sol_file_get_abi(argumentList[0], argumentList[1], argumentList[2])
|
983,594 | d19beae5e6b83e9630e737a304e4def32f297140 | '''
@author:KongWeiKun
@file: EncryptUtil.py
@time: 18-1-20 下午1:33
@contact: 836242657@qq.com
'''
import os
import time
'''
加密解密
'''
from Crypto.Cipher import AES
import base64
def createSecretKey(size):
return (''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(size))))[0:size]
def aesEncrypt(text, secKey):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(secKey, 2, '0102030405060708')
ciphertext = encryptor.encrypt(text)
ciphertext = base64.b64encode(ciphertext)
ciphertext = str(ciphertext,encoding='utf-8')
return ciphertext
def rsaEncrypt(text, pubKey, modulus):
text = text[::-1]
rs = int(text.encode('hex'), 16)**int(pubKey, 16)%int(modulus, 16)
return format(rs, 'x').zfill(256)
def timeStamp(timeNum):
timeStamp = float(timeNum/1000)
timeArray = time.localtime(timeStamp)
reTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return reTime |
983,595 | 6f5feeeff0e59c0c76193a6640f3418a15d0892b | # -*- coding: utf-8 -*-
# @Time : 5/3/18 10:24
# @Author : Shun Zheng
from __future__ import print_function
import csv
import sys
import os
import shutil
import time
import random
from collections import Counter, OrderedDict
from collections import defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from mpl_toolkits.axes_grid1 import make_axes_locatable
from torchtext.vocab import pretrained_aliases, Vectors
from sklearn.metrics import average_precision_score, precision_recall_curve, precision_recall_fscore_support, auc
# a temporary hack for using chinese word vectors in the financial domain
class MyVec(Vectors):
def __init__(self, name='wind_vec.50d.txt', **kwargs):
super(MyVec, self).__init__(name, **kwargs)
def build_field_vocab_from_dict(field, vocab_freq_dict, **kwargs):
print('Build vocabulary from dict')
vocab_counter = Counter(vocab_freq_dict)
# taken from torchtext.data.Field.build_vocab()
specials = list(OrderedDict.fromkeys(
tok for tok in [field.unk_token, field.pad_token, field.init_token,
field.eos_token]
if tok is not None))
field.vocab = field.vocab_cls(vocab_counter, specials=specials, **kwargs)
print('Total vocabulary size:', len(field.vocab.freqs), 'effective size:', len(field.vocab))
def build_field_vocab_from_file(field, vocab_freq_file, **kwargs):
print('Build Field vocabulary from existing vocabulary files', vocab_freq_file)
# read vocabulary frequency file
with open(vocab_freq_file, 'r') as fin:
csv_reader = csv.reader(fin)
vocab_freq = []
for row in csv_reader:
word, freq = row
# word = word.decode('utf-8')
# for python3
freq = int(freq)
vocab_freq.append((word, freq))
vocab_counter = Counter(dict(vocab_freq))
# taken from torchtext.data.Field.build_vocab()
specials = list(OrderedDict.fromkeys(
tok for tok in [field.unk_token, field.pad_token, field.init_token,
field.eos_token]
if tok is not None))
field.vocab = field.vocab_cls(vocab_counter, specials=specials, **kwargs)
print('Total vocabulary size:', len(field.vocab.freqs), 'effective size:', len(field.vocab))
def build_field_vocab_from_dataset(field, data_set, vocab_freq_file=None, **kwargs):
# build field vocabulary from data_set
print('Build Field vocabulary from dataset')
field.build_vocab(data_set, **kwargs)
print('Total vocabulary size:', len(field.vocab.freqs), 'effective size:', len(field.vocab))
if isinstance(vocab_freq_file, str):
print('Dump vocabulary frequencies into', vocab_freq_file)
# dump vocabulary frequency
sorted_vocab_freq = sorted(field.vocab.freqs.items(), key=lambda x: x[1])
with open(vocab_freq_file, 'w') as fout:
csv_writer = csv.writer(fout)
for row in sorted_vocab_freq:
word, freq = row
# word = word.encode('utf-8')
# for python3
csv_writer.writerow([word, freq])
def build_field_vocabulary(field,
from_vocab=True,
vocab_freq_file=None,
vocab_freq_dict=None,
data_set=None,
**kwargs):
"""
Build field vocabulary with three options:
1. from vocabulary frequency file
2. from vocabulary frequency dict
3. by counting tokens in dataset and dump into vocab_freq_file accordingly
Args:
field: torchtext.data.Field object
from_vocab: flag of whether to recover from the vocabulary file directly
vocab_freq_file: the absolute path of the vocabulary file
vocab_freq_dict: the vocabulary frequency dictionary
data_set: torchtext.data.Dataset object
**kwargs: key word arguments to be parsed to torchtext.Vocab class
"""
if 'vectors' in kwargs and isinstance(kwargs['vectors'], str) and kwargs['vectors'] not in pretrained_aliases:
print('Read from self-pretrained vectors', kwargs['vectors'])
kwargs['vectors'] = MyVec(name=kwargs['vectors'])
if from_vocab and isinstance(vocab_freq_file, str) and os.path.exists(vocab_freq_file):
build_field_vocab_from_file(field, vocab_freq_file, **kwargs)
elif from_vocab and isinstance(vocab_freq_dict, dict):
build_field_vocab_from_dict(field, vocab_freq_dict, **kwargs)
elif data_set is not None:
build_field_vocab_from_dataset(field, data_set, vocab_freq_file, **kwargs)
else:
raise Exception('Build field vocabulary failed, please check input arguments!')
def random_init_certain_vector(vocab, token='<unk>', mean=0, std=0.5):
"""
Randomly initialize certain vector of the vocabulary object
Args:
vocab: the object of torchtext.vocab.Vocab class
token: token string
mean: mean of the normal distribution
std: std of the normal distribution
"""
idx = vocab.stoi[token]
nn.init.normal_(vocab.vectors[idx], mean=mean, std=std)
def save_checkpoint(state_dict, is_best, file_path_prefix, file_name_suffix=''):
file_path = file_path_prefix + file_name_suffix
torch.save(state_dict, file_path)
if is_best:
shutil.copyfile(file_path, file_path_prefix + '.best')
def resume_checkpoint(net, model_file_path,
strict=False, resume_key='model', print_keys=('epoch', 'dev_f1', 'dev_avg_prec')):
if os.path.exists(model_file_path):
resume_dict = torch.load(model_file_path)
print('Resume from previous model checkpoint {}'.format(model_file_path))
for key in print_keys:
if key in resume_dict:
print('{}: {}'.format(key, resume_dict[key]))
net.load_state_dict(resume_dict[resume_key], strict=strict)
print('Resume successfully')
return True
else:
print(Warning('Warning: model resume failed because', model_file_path, 'not found'))
return False
def id_to_word(word_ids, itos):
words = []
for wid in word_ids:
words.append(itos[wid])
return words
def show_word_score_heatmap(score_tensor, x_ticks, y_ticks, figsize=(3, 8)):
# to make colorbar a proper size w.r.t the image
def colorbar(mappable):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.1)
return fig.colorbar(mappable, cax=cax)
mpl.rcParams['font.sans-serif'] = ['simhei']
mpl.rcParams['axes.unicode_minus'] = False
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
img = ax.matshow(score_tensor.numpy())
plt.xticks(range(score_tensor.size(1)), x_ticks, fontsize=14)
plt.yticks(range(score_tensor.size(0)), y_ticks, fontsize=14)
colorbar(img)
ax.set_aspect('auto')
plt.show()
def show_word_scores_heatmap(score_tensor_tup, x_ticks, y_ticks, nrows=1, ncols=1, titles=None, figsize=(8, 8), fontsize=14):
def colorbar(mappable):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="1%", pad=0.1)
return fig.colorbar(mappable, cax=cax)
if not isinstance(score_tensor_tup, tuple):
score_tensor_tup = (score_tensor_tup, )
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
for idx, ax in enumerate(axs):
score_tensor = score_tensor_tup[idx]
img = ax.matshow(score_tensor.numpy())
plt.sca(ax)
plt.xticks(range(score_tensor.size(1)), x_ticks, fontsize=fontsize)
plt.yticks(range(score_tensor.size(0)), y_ticks, fontsize=fontsize)
if titles is not None:
plt.title(titles[idx], fontsize=fontsize + 2)
colorbar(img)
for ax in axs:
ax.set_aspect('auto')
plt.tight_layout(h_pad=1)
plt.show()
def build_salient_phrase_candidates(sample_decomp_info, num_classes=2, score_threshold=1.1):
# logic to filter unimportant phrases
def is_salient_phrase(phrase_scores, num_classes, score_threshold):
for cid in range(num_classes):
x = phrase_scores[:, cid] > score_threshold
if x.sum() == len(x):
# all values for cid are greater than the threshold
return True
return False
phrase_candidate_dict = defaultdict(lambda: {'sample_ids': [],
'decompose_scores': [],
'count': None,
'average_score': None,
'phrase_score': None,
'class_id': None})
# iterate through all samples to construct phrase candidates
for idx, decomp_sample in enumerate(sample_decomp_info):
sample_id = decomp_sample[0]
word_ids = decomp_sample[1]
decomp_scores = np.array(decomp_sample[2])
for idx in range(len(word_ids)):
ngram_max = len(word_ids) - idx
for ngram_len in range(1, ngram_max + 1):
sid = idx
eid = idx + ngram_len
tmp_phr_scores = decomp_scores[sid:eid, :]
if is_salient_phrase(tmp_phr_scores, num_classes, score_threshold):
tmp_phr_ids = tuple(word_ids[sid:eid])
tmp_phr_total_score = np.prod(tmp_phr_scores, axis=0, keepdims=True)
# record salient phrase candidate
phrase_candidate_dict[tmp_phr_ids]['sample_ids'].append(sample_id)
phrase_candidate_dict[tmp_phr_ids]['decompose_scores'].append(tmp_phr_total_score)
else:
# because later ngrams cannot be salient phrases
break
# calculate average scores and associated information for each phrase
for key in phrase_candidate_dict:
phr_dict = phrase_candidate_dict[key]
# get expected decomposition score
avg_score = np.mean(np.concatenate(phr_dict['decompose_scores'], axis=0), axis=0)
# normalize
avg_score = avg_score / avg_score.sum()
max_score = np.max(avg_score)
class_id = np.argmax(avg_score)
phr_dict['count'] = len(phr_dict['sample_ids'])
phr_dict['average_score'] = avg_score
phr_dict['phrase_score'] = max_score
phr_dict['class_id'] = class_id
return phrase_candidate_dict
def get_salient_phrases(phrase_candidate_dict, word_id2str=None, num_classes=2, min_count=0):
salient_phrases = [[] for _ in range(num_classes)]
for phrase_ids_key in phrase_candidate_dict:
phr_dict = phrase_candidate_dict[phrase_ids_key]
if phr_dict['count'] < min_count:
continue
if word_id2str is None:
words = None
else:
words = id_to_word(phrase_ids_key, word_id2str)
# Note: this is not a deep copy, it just creates a new dict() object,
# but values in the dictionary refer previous memory spaces.
new_phr_dict = {'phrase_ids': phrase_ids_key, 'words': words}
for key in ['sample_ids', 'phrase_score']:
new_phr_dict[key] = phr_dict[key] # note: this is a shallow copy
cid = phr_dict['class_id']
salient_phrases[cid].append(new_phr_dict)
for phr_dicts in salient_phrases:
phr_dicts.sort(key=lambda x: x['phrase_score'], reverse=True)
return salient_phrases
def get_confusion_matrix(raw_ids, pred_probs, true_labels, threshold=0.5):
tps = []
fps = []
fns = []
tns = []
for idx, rid in enumerate(raw_ids):
p = pred_probs[idx]
if p >= threshold:
if true_labels[idx] == 1:
tps.append((rid, p))
elif true_labels[idx] == 0:
fps.append((rid, p))
else:
raise ValueError('Value for the label must be 1 or 0')
else:
if true_labels[idx] == 1:
fns.append((rid, p))
elif true_labels[idx] == 0:
tns.append((rid, p))
else:
raise ValueError('Value for the label must be 1 or 0')
return tps, fps, fns, tns
def resume_and_evaluate(rel_task, cpt_file_path, rel_dataset_iter):
print('{} Resume and evaluate'.format(time.asctime(), cpt_file_path))
if cpt_file_path is not None:
rel_task.resume_model_from(cpt_file_path, strict=True)
eids, pred_probs, true_labels = rel_task.get_prediction_probs_info(rel_dataset_iter)
example_ids = eids.tolist()
pred_probs = pred_probs[:, 1].numpy()
true_labels = true_labels.numpy()
pred_labels = (pred_probs > 0.5).astype(int)
pred_info = {
'example_ids': example_ids,
'true_labels': true_labels,
'pred_probs': pred_probs,
'pred_labels': pred_labels
}
precs, recalls, threshes = precision_recall_curve(true_labels, pred_probs)
pr_auc = auc(recalls, precs)
avg_prec = average_precision_score(true_labels, pred_probs)
dec_prec, dec_recall, dec_f1_score, _ = precision_recall_fscore_support(true_labels, pred_labels, average='binary')
print('[Evaluate Results]: prec {:.3f}, recall {:.3f}, f1 {:.3f}, avg prec {:.3f}, pr auc {:.3f}'.format(
dec_prec, dec_recall, dec_f1_score, avg_prec, pr_auc))
return pred_info, precs, recalls, pr_auc, avg_prec, dec_f1_score, dec_prec, dec_recall
def retrain_and_evaluate(rel_task, new_train_file, model_store_prefix, rel_dataset_iter,
print_loss_freq=1000):
print('{} Re-train relation task with {}'.format(time.asctime(), new_train_file))
rel_task.config['train_file'] = new_train_file
rel_task.config['model_store_name_prefix'] = model_store_prefix
rel_task.init_train_set() # read new training data
rel_task.init_neural_network() # initialize neural network, optimizer, loss, dev state
rel_task.train(print_loss_freq=print_loss_freq)
best_cpt_path = os.path.join(rel_task.config['model_dir'],
'{}.best'.format(model_store_prefix))
dev_eval_results = resume_and_evaluate(rel_task, best_cpt_path, rel_dataset_iter)
dev_f1_score = dev_eval_results[5]
print('{} Re-train procedure completes, dev f1 score is {}'.format(time.asctime(), dev_f1_score))
return dev_f1_score
def plot_multi_pr_curves(plot_tuples, plot_title='Precision Recall Curves',
figsize=(12, 8), xlim=(0, 1), ylim=(0, 1),
basic_font_size=14):
plt.figure(figsize=figsize)
for eval_infos, line_name, line_color in plot_tuples:
precs = eval_infos[0]
recalls = eval_infos[1]
avg_prec = eval_infos[3]
f1_score = eval_infos[6]
plt.step(recalls, precs,
label=line_name + ' (AUC {0:.3f}, F1 {1:.3f})'.format(avg_prec, f1_score),
color=line_color)
dec_prec = eval_infos[4]
dec_recall = eval_infos[5]
plt.plot(dec_recall, dec_prec, 'o', color=line_color, markersize=8)
plt.vlines(dec_recall, 0, dec_prec, linestyles='dashed', colors=line_color)
plt.hlines(dec_prec, 0, dec_recall, linestyles='dashed', colors=line_color)
plt.legend(fontsize=basic_font_size)
plt.title(plot_title, fontsize=basic_font_size+ 2)
plt.xlabel('Recall', fontsize=basic_font_size)
plt.ylabel('Precision', fontsize=basic_font_size)
plt.xticks(fontsize=basic_font_size)
plt.yticks(fontsize=basic_font_size)
plt.xlim(xlim)
plt.ylim(ylim)
def plot_multi_agg_pr_curves(line_name2pr_list, plot_title='Aggregated Precision-Recall Curve',
figsize=(12, 8), xlim=(0, 1), ylim=(0, 1), basic_font_size=14):
plt.figure(figsize=figsize)
for line_name, (prec_list, recall_list) in line_name2pr_list.items():
plt.step(recall_list, prec_list, label=line_name)
plt.legend(fontsize=basic_font_size)
plt.title(plot_title, fontsize=basic_font_size+ 2)
plt.xlabel('Recall', fontsize=basic_font_size)
plt.ylabel('Precision', fontsize=basic_font_size)
plt.xticks(fontsize=basic_font_size)
plt.yticks(fontsize=basic_font_size)
plt.grid(True)
plt.xlim(xlim)
plt.ylim(ylim)
def get_gpu_mem_usage(gpu_id):
gpu_qargs = ['index', 'gpu_name', 'memory.used', 'memory.total']
query_cmd = 'nvidia-smi -i {} --query-gpu={} --format=csv,noheader'.format(gpu_id, ','.join(gpu_qargs))
pipe = os.popen(query_cmd)
query_res = pipe.readlines()[0].strip('\n')
items = query_res.split(',')
mem_used = float(items[-2].strip(' MiB'))
mem_total = float(items[-1].strip(' MiB'))
return mem_used / mem_total
def wait_idle_gpu(gpu_id=None, mem_usage_ratio=0.01, sleep_second=2):
if gpu_id is None:
gpu_id = os.environ['CUDA_VISIBLE_DEVICES']
print('{} Choose GPU {}, wait for memory usage ratio <= {}'.format(
time.asctime(), gpu_id, mem_usage_ratio))
sys.stdout.flush()
while True:
cur_mem_usage = get_gpu_mem_usage(gpu_id)
if cur_mem_usage <= mem_usage_ratio:
print('{} Current memory usage {:.5f}, start to bind gpu {}'.format(
time.asctime(), cur_mem_usage, gpu_id
))
apply_gpu_memory(gpu_id=0)
break
ss = random.randint(sleep_second, sleep_second + 20)
time.sleep(ss)
def apply_gpu_memory(gpu_id=0):
print('{} Choose gpu {}'.format(time.asctime(), os.environ['CUDA_VISIBLE_DEVICES']))
# quickly apply a small part of gpu memory
tmp_tensor = torch.FloatTensor(100, 100)
cuda_device = 'cuda:{}'.format(gpu_id)
tmp_tensor.to(cuda_device)
def set_all_random_seed(seed):
print('Set random seed {}'.format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
|
983,596 | 214a6690342af3ad1c2c866362214f6f1c9393f5 | """
Test reading and writing binary and json strings to
Azure blob storage
"""
import os
import pytest
import json
import uuid
import pandas as pd
import pyarrow as pa
from wrattler_data_store.storage import *
@pytest.mark.skipif("WRATTLER_AZURE_STORAGE" not in os.environ.keys(),
reason="Relies on Azure backend")
def test_json_round_trip():
"""
Write a simple json string and read it back
"""
j = '[{"name": "Alice", "occupation": "researcher"}]'
s = Store("Azure")
cell_hash = str(uuid.uuid4())
frame_name = str(uuid.uuid4())
s.write(j, cell_hash, frame_name)
result = s.read(cell_hash, frame_name).decode("utf-8")
assert(result == j)
@pytest.mark.skipif("WRATTLER_AZURE_STORAGE" not in os.environ.keys(),
reason="Relies on Azure backend")
def test_arrow_round_trip():
"""
Read an arrow file. First create one from a pandas df
"""
df = pd.DataFrame({"a":[1,3,5],"b":[2,4,6]})
batch = pa.RecordBatch.from_pandas(df, preserve_index=False)
sink = pa.BufferOutputStream()
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
arrow_buffer = sink.getvalue()
# now write this to the datastore
s = Store("Azure")
cell_hash = str(uuid.uuid4())
frame_name = str(uuid.uuid4())
s.write(arrow_buffer.to_pybytes(), cell_hash, frame_name)
# now try and read it back
result = s.read(cell_hash, frame_name)
reader = pa.ipc.open_file(result)
df_new = reader.read_pandas()
assert(pd.DataFrame.equals(df,df_new))
|
983,597 | 669947717be124757c5becc191c015c1dcf341ac | from random import randint
name = input('Enter your name: ')
x = randint(0,1)
if(x == 1):
print(name + ' is gay')
else:
print(name + ' is not gay') |
983,598 | df89b80bbccc98eccb620415e1d6d308f6371e7d | def checkio(l, m=''.join):
c = lambda s: [x[0] for x in s if len(set(x)) == 1 and x[0] != '.'] or 'D'
return m(c(l + map(None, *l) + [m(l)[::4], m(l)[2:-1:2]]))[0]
# print checkio([
# "X.O",
# "XX.",
# "XOO"]) == "X"
# print checkio([
# "OO.",
# "XOX",
# "XOX"]) == "O"
# print checkio([
# "OOX",
# "XXO",
# "OXX"]) == "D"
# print checkio(["OXO","XOX","OXO"])
print checkio(["...", "XXX", "OO."])
|
983,599 | 3abc7047b45432310f414f34a69482c56d888c12 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os
import base64
import time
import MySQLdb
#import pymysql
import csv
class libdb_mysql:
"Class for manage mysql database queries"
# -- constructor --
def __init__(self):
self.path = ""
self.debug_mode = 1
return
# -- open database --
def open_db(self):
user = ""
password = ""
self.path = os.getcwd() + "/"
pwfile = self.path + "auth.pk"
database = "ccmarket"
# bbdd = MySQLdb.connect(host="192.168.2.200", user="root", passwd="weareafamily", db=database)
bbdd = MySQLdb.connect(host="localhost", user="juanlu", passwd="", db=database)
#bbdd = pymysql.connect(host="localhost", user="juanlu", passwd="", db=database)
if os.path.isfile(pwfile):
csv_list = []
fp = open(pwfile, "r")
reader = csv.DictReader(fp)
for i in reader:
csv_list.append(i)
fp.close()
user = base64.b64decode(csv_list[0]['user'])
password = base64.b64decode(csv_list[0]['password'])
if csv_list[0]['user'] != "" \
and csv_list[0]['password'] != "":
# bbdd = MySQLdb.connect(host="192.168.2.200", user=user, passwd=password, db=database)
bbdd = MySQLdb.connect(host="localhost", user=user, passwd=password, db=database)
# bbdd = pymysql.connect(host="localhost", user=user, passwd=password, db=database)
sql = bbdd.cursor(cursorclass=MySQLdb.cursors.DictCursor)
#sql = bbdd.cursor(pymysql.cursors.DictCursor)
return bbdd, sql
# -- insert quotes data on db --
def generic_row_insert(self, data, table):
# -- open database --
bbdd, sql = self.open_db()
# -- get and filter data --
for i in data:
query = "INSERT INTO " + table + " ("
for n in i.keys():
query += n + ","
query = query[0:-1]
query += ") VALUES ("
for n in i.keys():
query += "'" + str(i[n]) + "',"
query = query[0:-1]
query += ");"
if self.debug_mode >= 1:
print(query)
op_ok = 0
while op_ok == 0:
try:
sql.execute(query)
op_ok = 1
except:
time.sleep(0.1)
bbdd.commit()
bbdd.close()
return
# -- make a generic insert with one row --
def generic_insert(self, data, table):
bbdd, sql = self.open_db()
query = "INSERT INTO " + table + " ("
for i in data.keys():
query += i + ", "
query = query[0:-2]
query += ") VALUES ("
for i in data.keys():
query += "'" + str(data[i]) + "', "
query = query[0:-2]
query += ");"
if self.debug_mode >= 1:
print(query)
sql.execute(query)
bbdd.commit()
bbdd.close()
return
# -- make generic select defined by data content --
def generic_select(self, data, table):
bbdd, sql = self.open_db()
query = "SELECT * FROM " + table + " WHERE "
for i in data:
if i.upper().find("SELECT") == 0:
query = i + " FROM " + table + " WHERE "
if i.find("SELECT") < 0 \
and i.find("ORDER BY") < 0:
query += i + " AND "
if i.find("ORDER BY") == 0:
if query[-5:] == " AND ":
query = query[0:-5]
query += " " + i
if query[-5:] == " AND ":
query = query[0:-5]
if query[-7:] == " WHERE ":
query = query[0:-7]
query += ";"
if self.debug_mode >= 1:
print(query)
sql.execute(query)
tuplas = sql.fetchall()
bbdd.close()
if tuplas is None:
return []
if len(tuplas) == 0:
return []
return tuplas
# -- make a generic delete defined by data content --
def generic_delete(self, data, table):
bbdd, sql = self.open_db()
query = "DELETE FROM " + table
if len(data) > 0:
query += " WHERE "
for i in data:
query += i + " AND "
query = query[0:-5]
query += ";"
if self.debug_mode >= 1:
print(query)
sql.execute(query)
bbdd.commit()
bbdd.close()
return
# -- make a generic update defined by two data content --
def generic_update(self, data1, data2, table):
bbdd, sql = self.open_db()
# -- compose query --
query = "UPDATE " + table
if len(data1) > 0:
query += " SET "
for i in data1.keys():
query += "`" + i + "`='" + str(data1[i]) + "', "
query = query[0:-2]
if len(data2) > 0:
query += " WHERE "
for i in data2:
query += i + " AND "
if query[-5:] == " AND ":
query = query[0:-5]
query += ";"
if self.debug_mode >= 1:
print(query)
# -- execute query --
count = 0
while count < 2:
try:
sql.execute(query)
except:
pass
count += 1
bbdd.commit()
bbdd.close()
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.