index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,500 | 07a02959748eed5b3a8f8ecb2ce6fbe5a4e4688f | import os
import numpy as np
def processData(datawithmissing,windowSize):
train_roadList=[]
for i in range(datawithmissing.shape[0]):
tempdata=datawithmissing[i]
if i==0:
trainLabel=tempdata[tempdata>0]
else:
trainLabel=np.hstack((trainLabel,tempdata[tempdata>0]))
trainPosition=tempdata.nonzero()[0]
for j,pos in enumerate(trainPosition):
tempTrain=np.append(tempdata[pos-windowSize:pos],tempdata[pos+1:pos+windowSize+1])
if j==0:
trainData_temp=tempTrain
else:
trainData_temp=np.vstack((trainData_temp,tempTrain))
train_roadList.append(trainData_temp)
trainData=train_roadList[0]
for train in train_roadList[1:]:
trainData=np.vstack((trainData,train))
return trainData,trainLabel
def processData_test(test_data,windowSize,test_dataLabel):
test_roadList=[]
for i in range(test_data.shape[0]):
tempTestData=test_data[i]
tempTestLabel=test_dataLabel[i]
position=np.nonzero(tempTestLabel)[0]
for j,pos in enumerate(position):
tempTest=np.append(tempTestData[pos-windowSize:pos],tempTestData[pos+1:pos+windowSize+1])
if j==0:
testData_temp=tempTest
else:
testData_temp=np.vstack((testData_temp,tempTest))
test_roadList.append(testData_temp)
testData=test_roadList[0]
for test in test_roadList[1:]:
testData=np.vstack((testData,test))
return testData
n_day = 25
n_road = 317
n_interval = 180
# n_interval=90
# n_interval=60
proPath = os.path.abspath(os.path.join(os.path.dirname('__file__'), '../..'))
dataPath = os.path.join(proPath, 'data')
rushHourPath = os.path.join(dataPath, 'rushHour.csv')
MPath = os.path.join(dataPath, 'maskTensor.csv')
# rushHourPath = os.path.join(dataPath, 'rushHour_10.csv')
# MPath = os.path.join(dataPath, 'mask_10.csv')
# rushHourPath = os.path.join(dataPath, 'rushHour_0.8.csv')
data = np.loadtxt(rushHourPath, dtype=float).reshape((n_day,n_road,n_interval))
M = np.loadtxt(MPath, dtype=int).reshape((n_day,n_road,n_interval))
datawithmissing = data * (1-M)
dataMissing = data * M
datawithMissing_2=datawithmissing[0]
dataMissing_2=dataMissing[0]
for i in range(1,n_day):
datawithMissing_2=np.hstack((datawithMissing_2,datawithmissing[i]))
dataMissing_2=np.hstack((dataMissing_2,dataMissing[i]))
window_size=12
#padding
padding=np.zeros((n_road,window_size))
datawithMissing_2=np.hstack((padding,datawithMissing_2))
datawithMissing_2=np.hstack((datawithMissing_2,padding))
dataMissing_2=np.hstack((padding,dataMissing_2))
dataMissing_2=np.hstack((dataMissing_2,padding))
trainData,trainLabel=processData(datawithMissing_2,window_size)
testData=processData_test(datawithMissing_2,window_size,dataMissing_2)
testLabel=dataMissing_2[dataMissing_2>0]
testLabel=testLabel.reshape(testLabel.size,1)
with open(os.path.join(dataPath,'trainSet_mlp.csv'), 'w')as fw_traindata:
np.savetxt(fw_traindata,trainData,fmt='%.6f')
with open(os.path.join(dataPath, 'trainLabel_mlp.csv'), 'w')as fw_trainlabel:
np.savetxt(fw_trainlabel,trainLabel,fmt='%.6f')
with open(os.path.join(dataPath, 'testSet_mlp.csv'), 'w')as fw_testdata:
np.savetxt(fw_testdata,testData,fmt='%.6f')
with open(os.path.join(dataPath, 'testLabel_mlp.csv'), 'w')as fw_testlabel:
np.savetxt(fw_testlabel,testLabel, fmt='%.6f')
|
985,501 | 7b53eaa67a065a32a12d3b9f172a2e61b2c21160 | # coding: utf-8
# octal decimal
# raquel ambrozio
numero_octal = raw_input()
expoente = len(numero_octal)
decimal = 0
for i in range(len(numero_octal)):
expoente -= 1
decimal0 = int(numero_octal[i]) * (8 ** expoente)
decimal += decimal0
print "%d * 8^ %d = %d" % (int(numero_octal[i]), expoente, decimal0)
print "%d(8) = %d(10)" % (int(numero_octal), decimal)
|
985,502 | 5f0215c7c3ea9b07f1cd07517b1dcca0c18ca8dc | class Persona:
def __init__(self,nombre,apellido,edad,sexo,nombreUsuario,contra,tele,tipo):
self.nombre=nombre
self.apellido=apellido
self.edad=edad
self.sexo=sexo
self.nombreUsuario=nombreUsuario
self.contra=contra
self.tele=tele
self.tipo=tipo
# METODOS GET
def getNombre(self):
return self.nombre
def getApellido(self):
return self.apellido
def getEdad(self):
return self.edad
def getSexo(self):
return self.sexo
def getNomUsuario(self):
return self.nombreUsuario
def getContra(self):
return self.contra
def getTele(self):
return self.tele
def getTipo(self):
return self.tipo
# METODOS SET
def setNombre(self, nombre):
self.nombre = nombre
def setApellido(self, apellido):
self.apellido = apellido
def setEdad(self, edad):
self.edad = edad
def setSexo(self,sexo):
self.sexo=sexo
def setNomUsuario(self,nombreUsuario):
self.nombreUsuario=nombreUsuario
def setContra(self,contra):
self.contra=contra
def setTele(self,tele):
self.tele=tele
|
985,503 | d29976adef476c2e38c80ab07607b17d366db8b2 | # flake8: noqa
import unittest
import repour.server.endpoint.validation as validation
import voluptuous
class TestPrimitives(unittest.TestCase):
def test_nonempty_str(self):
self.assertEqual("asd", validation.nonempty_str("asd"))
self.assertEqual("asd qwe", validation.nonempty_str("asd qwe"))
self.assertEqual(" ", validation.nonempty_str(" "))
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_str("")
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_str(0)
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_str(False)
def test_nonempty_noblank_str(self):
self.assertEqual("asd", validation.nonempty_noblank_str("asd"))
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_noblank_str("")
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_noblank_str("\n")
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_noblank_str("asd qwe")
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_noblank_str(1)
with self.assertRaises(voluptuous.MultipleInvalid):
validation.nonempty_noblank_str(True)
def test_port_num(self):
self.assertEqual(65535, validation.port_num(65535))
with self.assertRaises(voluptuous.MultipleInvalid):
validation.port_num(0)
with self.assertRaises(voluptuous.MultipleInvalid):
validation.port_num(65536)
with self.assertRaises(voluptuous.MultipleInvalid):
validation.port_num("1000")
with self.assertRaises(voluptuous.MultipleInvalid):
validation.port_num(False)
def test_name_str(self):
self.assertEqual("asd", validation.name_str("asd"))
self.assertEqual("ASD", validation.name_str("ASD"))
self.assertEqual("123", validation.name_str("123"))
self.assertEqual("_", validation.name_str("_"))
self.assertEqual("asd-1.5.0", validation.name_str("asd-1.5.0"))
self.assertEqual("_ASD-", validation.name_str("_ASD-"))
with self.assertRaises(voluptuous.MatchInvalid):
validation.name_str("")
with self.assertRaises(voluptuous.MatchInvalid):
validation.name_str(" ")
with self.assertRaises(voluptuous.MatchInvalid):
validation.name_str("-asd-1.5.0")
with self.assertRaises(voluptuous.MatchInvalid):
validation.name_str("asd!1.5.0")
with self.assertRaises(voluptuous.MatchInvalid):
validation.name_str("%")
with self.assertRaises(voluptuous.MatchInvalid):
validation.name_str(0)
with self.assertRaises(voluptuous.MatchInvalid):
validation.name_str(False)
class TestAdjust(unittest.TestCase):
def test_adjust(self):
valid = {"name": "someproject", "ref": "2.2.11.Final"}
self.assertEqual(valid, validation.adjust(valid))
with self.assertRaises(voluptuous.MultipleInvalid):
validation.adjust({})
with self.assertRaises(voluptuous.MultipleInvalid):
validation.adjust({"name": "someproject"})
with self.assertRaises(voluptuous.MultipleInvalid):
validation.adjust({"ref": "2.2.11.Final"})
with self.assertRaises(voluptuous.MultipleInvalid):
validation.adjust({"name": "someproject", "ref": ""})
with self.assertRaises(voluptuous.MultipleInvalid):
validation.adjust({"name": "", "ref": "2.2.11.Final"})
with self.assertRaises(voluptuous.MultipleInvalid):
validation.adjust(
{"name": "someproject", "ref": "2.2.11.Final", "asd": "123"}
)
def test_callback(self):
valid = {
"name": "someproject",
"ref": "2.2.11.Final",
"callback": {"url": "http://localhost/asd"},
}
self.assertEqual(valid, validation.adjust(valid))
valid = {
"name": "someproject",
"ref": "2.2.11.Final",
"callback": {"url": "http://localhost/asd", "method": "POST"},
}
self.assertEqual(valid, validation.adjust(valid))
valid = {
"name": "someproject",
"ref": "2.2.11.Final",
"callback": {"url": "http://localhost/asd", "method": "PUT"},
}
self.assertEqual(valid, validation.adjust(valid))
with self.assertRaises(voluptuous.MultipleInvalid):
validation.adjust(
{
"name": "someproject",
"ref": "2.2.11.Final",
"callback": {"url": "http://localhost/asd", "method": "GET"},
}
)
class TestServerConfig(unittest.TestCase):
def test_server_config(self):
valid = {
"log": {"level": "ERROR", "path": "/home/repour/server.log"},
"bind": {"address": None, "port": 80},
"adjust_provider": {"type": "subprocess", "params": {"cmd": ["/bin/true"]}},
"repo_provider": {
"type": "gitlab",
"params": {
"api_url": "http://gitlab.example.com",
"username": "repour",
"password": "cxz321",
},
},
}
self.assertEqual(valid, validation.server_config(valid))
class TestClone(unittest.TestCase):
def test_clone_validation(self):
valid = {
"type": "git",
"ref": None,
"originRepoUrl": "http://github.com/project-ncl/repour.git",
"targetRepoUrl": "git+ssh://gerrit.com/project-ncl/repour.git",
}
self.assertEqual(valid, validation.clone(valid))
def test_clone_validation_with_git_scp_url(self):
valid = {
"type": "git",
"ref": None,
"originRepoUrl": "git@github.com:project-ncl/repour.git",
"targetRepoUrl": "git+ssh://gerrit.com/project-ncl/repour.git",
}
self.assertEqual(valid, validation.clone(valid))
|
985,504 | 9aa2abc222c3737dcd50de134250340a79ba0ea7 | ##-------------------------- Example 4. Tune Activation Function ---------------------------------
#The activation function controls the non-linearity of individual neurons and when to fire.
#Generally, the rectifier activation function is the most popular, but it used to be the sigmoid
#and the tanh functions and these functions may still be more suitable for different problems.
#Import libraries
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
# Model function
def model_func(activation='relu'):
model=Sequential()
model.add(Dense(12,activation=activation,input_dim=8))
model.add(Dense(8,activation=activation))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])
return model
#Random seed
seed=7
np.random.seed(seed)
# Data read
df=np.loadtxt('pima-indians-diabetes.txt',delimiter=',')
# Data split
X=df[:,0:8]
y=df[:,8]
# Model create
model=KerasClassifier(build_fn=model_func,epochs=100,batch_size=10,verbose=0)
# GridSearchCV for activation function
activation=['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']
param_grid=dict(activation=activation)
grid=GridSearchCV(estimator=model,param_grid=param_grid,n_jobs=-1,cv=3)
grid_results=grid.fit(X,y)
#Results
with open('Results_GridSearchCV_tune_Activation.txt','a') as f:
print('Best %f using %s' % (grid_results.best_score_,grid_results.best_params_),file=f)
means=grid_results.cv_results_['mean_test_score']
stds=grid_results.cv_results_['std_test_score']
params=grid_results.cv_results_['params']
for mean,stdev,param in zip(means,stds,params):
with open('Results_GridSearchCV_tune_Activation.txt','a') as f:
print('%f (%f) with: %r' % (mean,stdev,param),file=f)
|
985,505 | f0dcb015c6590200b6ad909342ec31341c665f6e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2018 Wan Li. All Rights Reserved
#
########################################################################
"""
File: pysaver.py
Author: Wan Li
Date: 2018/07/22 14:47:52
"""
import tensorflow as tf
if __name__ == "__main__":
export_dir = "../data/saved/"
# save
tf.reset_default_graph()
vi = tf.placeholder(tf.float32, shape=[1])
v1 = tf.get_variable("v1", shape=[1], initializer = tf.zeros_initializer)
v2 = tf.get_variable("v2", shape=[1], initializer = tf.zeros_initializer)
vo = vi * (v1 - v2)
inc_v1 = v1.assign(v1+1)
dec_v2 = v2.assign(v2-1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
inc_v1.op.run()
dec_v2.op.run()
print(sess.run(vo, feed_dict={vi: [2]}))
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING], signature_def_map= {
"model": tf.saved_model.signature_def_utils.build_signature_def(
inputs= {"x": tf.saved_model.utils.build_tensor_info(vi)},
outputs= {"y": tf.saved_model.utils.build_tensor_info(vo)})
})
builder.save()
|
985,506 | b24b284344cba95c0f31c54733d0d51b5b2f0dff | """ business days module """
import datetime
import holidays
def business_days() -> list[datetime.date]:
start_date = datetime.date(2021, 5, 1)
end_date = datetime.date(2021, 6, 1)
us_holidays = holidays.US()
delta = end_date - start_date
print("For start date", start_date, "and end date", end_date)
print("The delta is", delta)
print("The business days are:")
for i in range(delta.days + 1):
day = start_date + datetime.timedelta(days=i)
if day not in us_holidays and day.weekday() not in [5,6]:
print(day)
business_days()
|
985,507 | 3ac429e5f30ad111368531820df96c4114b22e71 | import networkx as nx
import numpy as np
def sbm(sizes, prob_matrix):
def get_comm_label(node):
print("test")
g = nx.Graph()
N = np.sum(sizes)
last_node_labels = np.cumsum(sizes)
for source in range(N):
for target in range(source+1, N):
pass
tst()
p_matrix = np.asarray([[0.7, 0.3], [0.3, 0.8]])
g = sbm(sizes=[4, 5], prob_matrix=p_matrix) |
985,508 | b74cfd1fdc81a973281dc119097ae4660410b1f9 | import FWCore.ParameterSet.Config as cms
from RecoEgamma.EgammaIsolationAlgos.eleTrackExtractorBlocks_cff import *
eleIsoDepositTk = cms.EDProducer("CandIsoDepositProducer",
src = cms.InputTag("gedGsfElectrons"),
trackType = cms.string('candidate'),
MultipleDepositsFlag = cms.bool(False),
ExtractorPSet = cms.PSet(EleIsoTrackExtractorBlock)
)
|
985,509 | c1f3ff16569ab776a2dc62d6826db6f385e8e8e9 |
import unittest
import numpy as np
from recursive import split_matrix
class TestSplitMatrix(unittest.TestCase):
def test_triangular(self):
a = np.array([
[1, 2, 3],
[0, 4, 5],
[0, 0, 6]
])
self.assertEqual(split_matrix(a), 0)
a = np.array([
[0, 1, 2, 3],
[0, 4, 5, 6],
[0, 0, 7, 8],
[0, 0, 0, 9]
])
self.assertEqual(split_matrix(a), 1)
for k in range(3, 10):
n = 2*k + 1
a = np.arange(n*n).reshape(n, n)
a = np.triu(a)
m = k - 1
self.assertEqual(split_matrix(a), m, msg=f'k={k}, n={n}, m={m}')
def test_quasi_triangular(self):
a = np.array([
[1, 2, 3],
[0, 4, 5],
[0, 6, 7]
])
self.assertEqual(split_matrix(a), 0)
a = np.array([
[1, 2, 3, 4],
[0, 5, 6, 7],
[0, 8, 9, 10],
[0, 0, 0, 11]
])
self.assertEqual(split_matrix(a), 2)
if __name__ == '__main__':
unittest.main()
|
985,510 | f23a00f25f81cc6f16a881f43715db6aac90607e | import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
matplotlib.rc('text', usetex = True)
import pylab
import sys
import os
import pandas as pd
inputfilename = sys.argv[1]
outputfilename = sys.argv[2]
#inputfilename = "D:/results/sampled/SampledRegression_average_0.txt"
#outputfilename = "D:/results/sampled/SampledRegression_estimate_statistics_single_0.pdf"
#inputfilename = "D:/results/logreg-zero/LogisticModelZero_average_0.txt"
#outputfilename = "D:/results/logreg-zero/LogisticModelZero_estimate_statistics_single_0.pdf"
#inputfilename = "D:/results/invprop_bad_mcmnf_revised/InverseProportionBad_average_0.txt"
#outputfilename = "D:/results/invprop_bad_mcmnf_revised/InverseProportionBad_estimate_statistics_single_0.pdf"
#inputfilename = "D:/results/logreg-zero_mcmnf_revised/LogisticModelZero_average_0.txt"
#outputfilename = "D:/results/logreg-zero_mcmnf_revised/LogisticModelZero_estimate_statistics_single_0.pdf"
#inputfilename = "D:/results/switchingobs_common_mcmnf_revised/SwitchingObservations_average_0.txt"
#outputfilename = "D:/results/switchingobs_common_mcmnf_revised/SwitchingObservations_estimate_statistics_single_0.pdf"
#xhat_UMF, mErr_UMF, DErr_UMF, DErrTheor_UMF, xhat_UT, mErr_UT, DErr_UT, DErrTheor_UT = np.loadtxt(inputfilename, delimiter = ' ', usecols=(0,1,2,3,4,5,6,7,8,9,10), unpack=True, dtype=float)
data = pd.read_csv(inputfilename, delimiter = " ", header=None, dtype=float)
n = int((data.shape[1] - 3) / 4)
f = plt.figure(num=None, figsize=(14, 3.5), dpi=150, facecolor='w', edgecolor='k')
plt.subplots_adjust(left=0.06, bottom=0.07, right=0.98, top=0.95, wspace=0.1)
ax = plt.subplot(111)
ls_m = (0, ())
ls_D = (0, (5, 1))
ls_Dth = (0, (1, 1))
colors = ['red', 'green', 'blue', 'cyan', 'magenta', 'yellow']
for j in range(n):
#ax.plot(data[[0]], data[[3+j*4+1]], linestyle=ls_m, color=colors[j], linewidth=2.5, alpha=0.7)
ax.plot(data[[0]], data[[3+j*4+2]], linestyle=ls_D, color=colors[j], linewidth=2.5, alpha=0.7)
#ax.plot(data[[0]], data[[3+j*4+3]], linestyle=ls_Dth, color=colors[j], linewidth=2.5, alpha=0.7)
#ax.fill_between(data[0], np.zeros_like(data[2]), data[2], color='black', alpha = 0.2, linewidth=0.0);
#ax.set_ylim(0, max(data[2]))
plt.savefig(outputfilename)
|
985,511 | c64993424b13e222ea226e209bef608f7a571cad | # -*- coding: utf-8 -*-
# @Time : 2019-08-26 22:50
# @Author : Wei Peng
# @FileName: __init__.py
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(Config)
# print(app.config["SECRET_KEY"])
db = SQLAlchemy(app)
db.create_all()
db.session.commit()
print("Begin!")
from app import models, views
|
985,512 | d167ba11cba2fa3ccb595289bca9fb671a6594e5 | from aws_cdk import (
aws_ec2 as ec2,
aws_iam as iam,
aws_eks as eks,
core
)
class VpcStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
self.vpc = ec2.Vpc(self, "VPC",
max_azs=3,
cidr="10.10.0.0/16",
# configuration will create 3 groups in 2 AZs = 6 subnets.
subnet_configuration=[ec2.SubnetConfiguration(
subnet_type=ec2.SubnetType.PUBLIC,
name="PublicSubnet",
cidr_mask=24
), ec2.SubnetConfiguration(
subnet_type=ec2.SubnetType.PRIVATE,
name="PrivateSubnet",
cidr_mask=24
)],
# nat_gateway_provider=ec2.NatProvider.gateway(),
nat_gateways=2,
gateway_endpoints={
"S3": ec2.GatewayVpcEndpointOptions(
service=ec2.GatewayVpcEndpointAwsService.S3
)
})
self.vpc.add_flow_log("FlowLogS3", destination=ec2.FlowLogDestination.to_s3(
), traffic_type=ec2.FlowLogTrafficType.REJECT)
props["vpc"] = self.vpc
|
985,513 | 69e6da22b4d79b23de17b0fd05cc05c503376c4c | '''
directorPython demoPointCloud.py --interactive
Listens to a point cloud topics and draws it in a UI
'''
from director import segmentation
from director.consoleapp import ConsoleApp
from director import drcargs
from director import vtkAll as vtk
import vtkRosPython as vtkRos
from director import applogic
from director import visualization as vis
from director.timercallback import TimerCallback
reader= vtkRos.vtkRosPointCloudSubscriber()
reader.Start("/velodyne/point_cloud_filtered")
print(reader)
import time
app = ConsoleApp()
view = app.createView()
def spin():
polyData = vtk.vtkPolyData()
reader.GetPointCloud(polyData)
frame_id = reader.GetFrameId()
sec = reader.GetSec()
nsec = reader.GetNsec()
message = str(polyData.GetNumberOfPoints()) + " points, "
message += frame_id + ", " + str(sec) + "." + str(nsec)
print(message)
vis.updatePolyData(polyData,'point cloud')
quitTimer = TimerCallback(targetFps=5.0)
quitTimer.callback = spin
quitTimer.start()
if app.getTestingInteractiveEnabled():
view.show()
app.showObjectModel()
app.start()
|
985,514 | 8f287ea0f91f8bf411f17e717dab91a0f5dfae1c | class Person:
def say_hi( self) :
print( ' Hello, how are you?' )
p = Person( )
p. say_hi( )
# 前面两 行同 样可以写 作
# Person( ) . say_hi( )
|
985,515 | 4fed0034328f7259d3c4e2db3789c01e903ed7a4 | import pickle
import segmentation_models_pytorch as smp
from tqdm import tqdm
class Validator:
def __init__(self, model, optimizer, loader, imgsize):
self.loader = loader
self.model = model
self.optimizer = optimizer
self.imgsize = imgsize
self.results = {"thresholds": [], "iou": [], "f-score": [],
"pred_pixels": [], "label_pixels": []}
@staticmethod
def iou_metric(pred,label,thr):
f = smp.utils.metrics.IoUMetric(eps=1., threshold=thr, activation=None)
return f(pred,label)
@staticmethod
def fscore_metric(pred,label,thr):
f = smp.utils.metrics.FscoreMetric(eps=1., threshold=thr, activation=None)
return f(pred,label)
def run(self, steps, device="cpu"):
self.model.eval()
self.model.to(device)
thresholds = [t/float(steps) for t in range(steps)]
self.results["thresholds"] = thresholds
with tqdm(self.loader) as iterator:
for image, label in iterator:
image = image.to(device)
label = label.to(device)
pred = self.model.predict(image)
for k in range(len(pred)):
self._eval_prediction(pred[k], label[k], steps)
def _eval_prediction(self, pred, label, steps):
score = [Validator.fscore_metric(pred, label, t/float(steps)).item()
for t in range(steps)]
iou = [Validator.iou_metric(pred, label, t/float(steps)).item()
for t in range(steps)]
ppix = [(pred>t/float(steps)).sum().item()/(self.imgsize*self.imgsize)
for t in range(steps)]
lpix = [label.sum().item()/(self.imgsize*self.imgsize)]
self.results["f-score"].append(score)
self.results["iou"].append(iou)
self.results["pred_pixels"].append(ppix)
self.results["label_pixels"].append(lpix)
def write_to_file(self, filename):
with open(filename, "wb") as file:
pickle.dump(self.results, file)
|
985,516 | e6b029737a1987b71aa540d682a59ffd2d312578 | import glob
import time
import math
import tkinter
from tkinter import *
from tkinter.messagebox import showinfo
# Funktionen definieren-------------------------------------------------------------------------------------------------
def save():
if not glob.glob("Drehen.csv"):
ergebnisliste = open("Drehen.csv", "w")
header = ["Datum", "Außendurchmesser", "Innendurchmesser", "Schnittgeschwindigkeit", "Drehzahl",
"Anlauf / Überlauf", "Werkstücklänge", "Anzahl der Schnitte", "Vorschub", "Hauptnutzungszeit"]
ergebnisliste.write(header[0] + ";" + header[1] + ";" + header[2] + ";" + header[3] +
";" + header[4] + ";" + header[5] + ";" + header[6] + ";" + header[7] + ";" + header[8] +
";" + header[9] + "\n")
lt = time.localtime()
year, month, day = lt[0:3]
datum = str(f"{day:02d}.{month:02d}.{year:4d}")
ergebnisliste.write(datum + ";" +
str(outerDiameterInput.get()) + ";" +
str(innerDiameterInput.get()) + ";" +
str(rotationInput.get()) + ";" +
str(speedInput.get()) + ";" +
str(startupOverrunInput.get().replace(".", ",")) + ";" +
str(workpieceLengthInput.get()) + ";" +
str(numberOfCutsInput.get()) + ";" +
str(feedRateInput.get().replace(".", ",")) + ";" +
str(mainUsageTimeOutput.get()).replace(".", ",") + "\n")
ergebnisliste.close()
else:
ergebnisliste = open("Drehen.csv", "a")
lt = time.localtime()
year, month, day = lt[0:3]
datum = str(f"{day:02d}.{month:02d}.{year:4d}")
ergebnisliste.write(datum + ";" +
str(outerDiameterInput.get()) + ";" +
str(innerDiameterInput.get()) + ";" +
str(rotationInput.get()) + ";" +
str(speedInput.get()) + ";" +
str(startupOverrunInput.get().replace(".", ",")) + ";" +
str(workpieceLengthInput.get()) + ";" +
str(numberOfCutsInput.get()) + ";" +
str(feedRateInput.get().replace(".", ",")) + ";" +
str(mainUsageTimeOutput.get()).replace(".", ",") + "\n")
ergebnisliste.close()
def back():
turnMenue.destroy()
import UserInterface
def ende():
turnMenue.destroy()
def reset():
outerDiameterInput.delete(0, END)
outerDiameterInput.focus_set()
innerDiameterInput.delete(0, END)
innerDiameterInput.insert(10, "0")
speedInput.delete(0, END)
speedInput.insert(10, "0")
rotationInput.delete(0, END)
rotationInput.insert(10, "0")
startupOverrunInput.delete(0, END)
startupOverrunInput.insert(10, "0")
workpieceLengthInput.delete(0, END)
workpieceLengthInput.insert(10, "0")
numberOfCutsInput.delete(0, END)
numberOfCutsInput.insert(10, "0")
feedRateInput.delete(0, END)
feedRateInput.insert(10, "0")
mainUsageTimeOutput.delete(0, END)
mainUsageTimeOutput.insert(10, "0")
def calculate():
try:
innerdiameter = float(innerDiameterInput.get().replace(",", "."))
innerdiameter = float(innerdiameter)
outerdiameter = float(outerDiameterInput.get().replace(",", "."))
outerdiameter = float(outerdiameter)
speed = float(speedInput.get().replace(",", "."))
speed = float(speed)
rotation = float(rotationInput.get().replace(",", "."))
rotation = float(rotation)
choice = turntype.get()
length = float(workpieceLengthInput.get().replace(",", "."))
length = float(length)
cuts = float(numberOfCutsInput.get().replace(",", "."))
cuts = float(cuts)
speedperrotation = float(feedRateInput.get().replace(",", "."))
speedperrotation = float(speedperrotation)
startup = overrun = float(startupOverrunInput.get().replace(",", "."))
startup = float(startup)
except ValueError:
tkinter.messagebox.showinfo("Info", "Bitte geben Sie ausschließlich Zahlen ein!")
reset()
if choice == 1:
turnway = length + startup
alternativediameter = outerdiameter - cuts * (speedperrotation + 1)
elif choice == 2:
turnway = length + startup + overrun
alternativediameter = outerdiameter - cuts * (speedperrotation + 1)
elif choice == 3:
turnway = (outerdiameter - innerdiameter) / 2 + startup
alternativediameter = (outerdiameter + innerdiameter) / 2 + startup
else:
turnway = (outerdiameter - innerdiameter) / 2 + startup + overrun
alternativediameter = (outerdiameter + innerdiameter) / 2 + startup + overrun
if speed == 0 and rotation == 0:
tkinter.messagebox.showinfo("Info", "Bitte geben Sie einen Wert für die Schnittgeschwindigkeit / Drehzahl ein.")
elif speedperrotation == 0:
tkinter.messagebox.showinfo("Info", "Bitte geben Sie einen Wert für den Vorschub ein!")
feedRateInput.delete(0, END)
feedRateInput.focus_set()
elif speed != 0 and rotation != 0:
speedInput.delete(0, END)
speedInput.insert(10, speed.__round__(2))
rotationInput.delete(0, END)
rotationInput.insert(10, rotation.__round__(2))
elif speed == 0:
speed = (rotation * alternativediameter * math.pi) / 1000
speedInput.delete(0, END)
speedInput.insert(10, speed.__round__(2))
else:
rotation = (speed / (alternativediameter * math.pi)) * 1000
rotationInput.delete(0, END)
rotationInput.insert(10, rotation.__round__(2))
mainusagetime = (math.pi * alternativediameter * turnway * cuts)/((speed * 1000) * speedperrotation)
mainUsageTimeOutput.delete(0, END)
mainUsageTimeOutput.insert(10, mainusagetime.__round__(2))
# Eingabefenster erstellen----------------------------------------------------------------------------------------------
turnMenue = Tk()
# Fenstername festlegen
turnMenue.title("Drehen")
# Label, Eingabefelder und Buttons erstellen und positionieren----------------------------------------------------------
# Außendurchmesser in mm------------------------------------------------------------------------------------------------
outerDiameterLabel = Label(turnMenue, text='Außendurchmesser [mm]').grid(row=0, column=0, padx=10, sticky='w')
outerDiameterInput = tkinter.Entry(turnMenue, width=10)
outerDiameterInput.grid(row=0, column=1)
# diameterInput.insert(10, "0")
outerDiameterInput.focus_set()
# Innendurchmesser in mm------------------------------------------------------------------------------------------------
innerDiameterLabel = Label(turnMenue, text='Innendurchmesser [mm]').grid(row=1, column=0, padx=10, sticky='w')
innerDiameterInput = tkinter.Entry(turnMenue, width=10)
innerDiameterInput.grid(row=1, column=1)
innerDiameterInput.insert(10, "0")
# Schnittgeschwindigkeit------------------------------------------------------------------------------------------------
speedLabel = Label(turnMenue, text='Schnittgeschwindigkeit [m/min]').grid(row=2, column=0, padx=10, sticky='w')
speedInput = tkinter.Entry(turnMenue, width=10)
speedInput.grid(row=2, column=1, pady=5)
speedInput.insert(10, "0")
# Drehzahl--------------------------------------------------------------------------------------------------------------
rotationLabel = Label(turnMenue, text='Drehzahl [1/min]', anchor='w').grid(row=3, column=0, padx=10, sticky='w')
rotationInput = tkinter.Entry(turnMenue, width=10)
rotationInput.grid(row=3, column=1, pady=5)
rotationInput.insert(10, "0")
# Anlauf / Überlauf-----------------------------------------------------------------------------------------------------
startupOverrunLabel = Label(turnMenue, text='Anlauf / Überlauf [mm]').grid(row=4, column=0, padx=10, sticky='w')
startupOverrunInput = tkinter.Entry(turnMenue, width=10)
startupOverrunInput.grid(row=4, column=1, pady=5)
startupOverrunInput.insert(10, "0")
# Werkstücklänge--------------------------------------------------------------------------------------------------------
workpieceLengthLabel = Label(turnMenue, text='Werkstücklänge [mm]').grid(row=0, column=3, padx=20, sticky='w')
workpieceLengthInput = Entry(turnMenue, width=10)
workpieceLengthInput.grid(row=0, column=4, padx=10, pady=5)
workpieceLengthInput.insert(10, "0")
# Anzahl der Schnitte---------------------------------------------------------------------------------------------------
numberOfCutsLabel = Label(turnMenue, text='Anzahl der Schnitte').grid(row=1, column=3, padx=20, sticky='w')
numberOfCutsInput = Entry(turnMenue, width=10)
numberOfCutsInput.grid(row=1, column=4, pady=5)
numberOfCutsInput.insert(10, "0")
# Vorschub je Umdrehung-------------------------------------------------------------------------------------------------
feedRateLabel = Label(turnMenue, text='Vorschub [mm]').grid(row=2, column=3, padx=20, sticky='w')
feedRateInput = Entry(turnMenue, width=10)
feedRateInput.grid(row=2, column=4, pady=5)
feedRateInput.insert(10, "0")
# Hauptnutzungszeit-----------------------------------------------------------------------------------------------------
mainUsageTimeLabel = Label(turnMenue, text='Hauptnutzungszeit [min]').grid(row=3, column=3, padx=20, sticky='w')
mainUsageTimeOutput = Entry(turnMenue, width=10)
mainUsageTimeOutput.grid(row=3, column=4, pady=5)
mainUsageTimeOutput.insert(10, "0")
# Auswahlschalter-------------------------------------------------------------------------------------------------------
turntype = IntVar()
turntype.set(1)
Radiobutton(turnMenue, text="Runddrehen mit Ansatz", variable=turntype, value=1).grid(row=5, column=0, sticky='w')
Radiobutton(turnMenue, text="Runddrehen ohne Ansatz", variable=turntype, value=2).grid(row=6, column=0, sticky='w')
Radiobutton(turnMenue, text="Plandrehen Vollzylinder", variable=turntype, value=3).grid(row=7, column=0, sticky='w')
Radiobutton(turnMenue, text="Plandrehen Hohlzylinder", variable=turntype, value=4).grid(row=8, column=0, sticky='w')
# Schalter--------------------------------------------------------------------------------------------------------------
buttonFrame = Frame(turnMenue)
buttonFrame.grid(row=9, columnspan=5)
calculateButton = Button(buttonFrame, text='Berechnen', width=10, command=calculate).grid(row=9, column=0, padx=5, pady=20)
resetButton = Button(buttonFrame, text='Zurücksetzen', width=10, command=reset).grid(row=9, column=1, padx=5, pady=20)
saveButton = Button(buttonFrame, text='Speichern', width=10, command=save).grid(row=9, column=2, padx=5, pady=20)
backButton = Button(buttonFrame, text='Zurück', width=10, command=back).grid(row=9, column=3, padx=5, pady=20)
exitButton = Button(buttonFrame, text='Beenden', width=10, command=ende).grid(row=9, column=4, padx=5, pady=20)
turnMenue.mainloop()
|
985,517 | a6726fe7b447fb2e6aef170fd32f5b277a1981b6 | import pandas as pd
list_uninsured = ["Abilene, TX Metro Area <br \/> Percent Uninsured: 13.7% <br \/> Number Uninsured: 21,000",
"Akron, OH Metro Area <br \/> Percent Uninsured: 5.0% <br \/> Number Uninsured: 35,000",
"Albany, GA Metro Area <br \/> Percent Uninsured: 15.2% <br \/> Number Uninsured: 23,000",
"Albany, OR Metro Area <br \/> Percent Uninsured: 7.1% <br \/> Number Uninsured: 9,000",
"Albany-Schenectady-Troy, NY Metro Area <br \/> Percent Uninsured: 3.1% <br \/> Number Uninsured: 27,000",
"Albuquerque, NM Metro Area <br \/> Percent Uninsured: 7.4% <br \/> Number Uninsured: 67,000",
"Alexandria, LA Metro Area <br \/> Percent Uninsured: 8.9% <br \/> Number Uninsured: 13,000",
"Allentown-Bethlehem-Easton, PA-NJ Metro Area <br \/> Percent Uninsured: 5.5% <br \/> Number Uninsured: 46,000",
"Altoona, PA Metro Area <br \/> Percent Uninsured: 4.7% <br \/> Number Uninsured: 6,000",
"Amarillo, TX Metro Area <br \/> Percent Uninsured: 15.3% <br \/> Number Uninsured: 39,000",
"Ames, IA Metro Area <br \/> Percent Uninsured: 4.9% <br \/> Number Uninsured: 5,000",
"Anchorage, AK Metro Area <br \/> Percent Uninsured: 12.2% <br \/> Number Uninsured: 48,000",
"Ann Arbor, MI Metro Area <br \/> Percent Uninsured: 2.7% <br \/> Number Uninsured: 10,000",
"Anniston-Oxford-Jacksonville, AL Metro Area <br \/> Percent Uninsured: 10.8% <br \/> Number Uninsured: 12,000",
"Appleton, WI Metro Area <br \/> Percent Uninsured: 3.6% <br \/> Number Uninsured: 8,000",
"Asheville, NC Metro Area <br \/> Percent Uninsured: 11.2% <br \/> Number Uninsured: 51,000",
"Athens-Clarke County, GA Metro Area <br \/> Percent Uninsured: 11.3% <br \/> Number Uninsured: 23,000",
"Atlanta-Sandy Springs-Roswell, GA Metro Area <br \/> Percent Uninsured: 13.0% <br \/> Number Uninsured: 758,000",
"Atlantic City-Hammonton, NJ Metro Area <br \/> Percent Uninsured: 8.7% <br \/> Number Uninsured: 23,000",
"Auburn-Opelika, AL Metro Area <br \/> Percent Uninsured: 7.3% <br \/> Number Uninsured: 12,000",
"Augusta-Richmond County, GA-SC Metro Area <br \/> Percent Uninsured: 11.0% <br \/> Number Uninsured: 64,000",
"Austin-Round Rock, TX Metro Area <br \/> Percent Uninsured: 11.7% <br \/> Number Uninsured: 246,000",
"Bakersfield, CA Metro Area <br \/> Percent Uninsured: 7.8% <br \/> Number Uninsured: 67,000",
"Baltimore-Columbia-Towson, MD Metro Area <br \/> Percent Uninsured: 4.9% <br \/> Number Uninsured: 136,000",
"Bangor, ME Metro Area <br \/> Percent Uninsured: 7.9% <br \/> Number Uninsured: 12,000",
"Barnstable Town, MA Metro Area <br \/> Percent Uninsured: 3.1% <br \/> Number Uninsured: 6,000",
"Baton Rouge, LA Metro Area <br \/> Percent Uninsured: 7.4% <br \/> Number Uninsured: 61,000",
"Battle Creek, MI Metro Area <br \/> Percent Uninsured: 3.8% <br \/> Number Uninsured: 5,000",
"Bay City, MI Metro Area <br \/> Percent Uninsured: 5.3% <br \/> Number Uninsured: 5,000",
"Beaumont-Port Arthur, TX Metro Area <br \/> Percent Uninsured: 17.4% <br \/> Number Uninsured: 69,000",
"Beckley, WV Metro Area <br \/> Percent Uninsured: 6.3% <br \/> Number Uninsured: 7,000",
"Bellingham, WA Metro Area <br \/> Percent Uninsured: 4.4% <br \/> Number Uninsured: 10,000",
"Bend-Redmond, OR Metro Area <br \/> Percent Uninsured: 7.0% <br \/> Number Uninsured: 13,000",
"Billings, MT Metro Area <br \/> Percent Uninsured: 7.0% <br \/> Number Uninsured: 12,000",
"Binghamton, NY Metro Area <br \/> Percent Uninsured: 4.3% <br \/> Number Uninsured: 10,000",
"Birmingham-Hoover, AL Metro Area <br \/> Percent Uninsured: 8.6% <br \/> Number Uninsured: 97,000",
"Bismarck, ND Metro Area <br \/> Percent Uninsured: 7.0% <br \/> Number Uninsured: 9,000",
"Blacksburg-Christiansburg-Radford, VA Metro Area <br \/> Percent Uninsured: 7.8% <br \/> Number Uninsured: 14,000",
"Bloomington, IL Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 7,000",
"Bloomington, IN Metro Area <br \/> Percent Uninsured: 6.6% <br \/> Number Uninsured: 11,000",
"Bloomsburg-Berwick, PA Metro Area <br \/> Percent Uninsured: 3.7% <br \/> Number Uninsured: 3,000",
"Boise City, ID Metro Area <br \/> Percent Uninsured: 10.5% <br \/> Number Uninsured: 73,000",
"Boston-Cambridge-Newton, MA-NH Metro Area <br \/> Percent Uninsured: 3.0% <br \/> Number Uninsured: 144,000",
"Boulder, CO Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 13,000",
"Bowling Green, KY Metro Area <br \/> Percent Uninsured: 5.5% <br \/> Number Uninsured: 10,000",
"Bremerton-Silverdale, WA Metro Area <br \/> Percent Uninsured: 3.8% <br \/> Number Uninsured: 9,000",
"Bridgeport-Stamford-Norwalk, CT Metro Area <br \/> Percent Uninsured: 9.2% <br \/> Number Uninsured: 86,000",
"Brownsville-Harlingen, TX Metro Area <br \/> Percent Uninsured: 27.6% <br \/> Number Uninsured: 116,000",
"Brunswick, GA Metro Area <br \/> Percent Uninsured: 13.8% <br \/> Number Uninsured: 16,000",
"Buffalo-Cheektowaga-Niagara Falls, NY Metro Area <br \/> Percent Uninsured: 3.5% <br \/> Number Uninsured: 39,000",
"Burlington, NC Metro Area <br \/> Percent Uninsured: 12.4% <br \/> Number Uninsured: 20,000",
"Burlington-South Burlington, VT Metro Area <br \/> Percent Uninsured: 3.5% <br \/> Number Uninsured: 7,000",
"California-Lexington Park, MD Metro Area <br \/> Percent Uninsured: 4.4% <br \/> Number Uninsured: 5,000",
"Canton-Massillon, OH Metro Area <br \/> Percent Uninsured: 6.7% <br \/> Number Uninsured: 26,000",
"Cape Coral-Fort Myers, FL Metro Area <br \/> Percent Uninsured: 13.3% <br \/> Number Uninsured: 97,000",
"Cape Girardeau, MO-IL Metro Area <br \/> Percent Uninsured: 8.0% <br \/> Number Uninsured: 8,000",
"Carbondale-Marion, IL Metro Area <br \/> Percent Uninsured: 6.1% <br \/> Number Uninsured: 8,000",
"Carson City, NV Metro Area <br \/> Percent Uninsured: 8.6% <br \/> Number Uninsured: 5,000",
"Casper, WY Metro Area <br \/> Percent Uninsured: 14.3% <br \/> Number Uninsured: 11,000",
"Cedar Rapids, IA Metro Area <br \/> Percent Uninsured: 3.9% <br \/> Number Uninsured: 10,000",
"Chambersburg-Waynesboro, PA Metro Area <br \/> Percent Uninsured: 6.6% <br \/> Number Uninsured: 10,000",
"Champaign-Urbana, IL Metro Area <br \/> Percent Uninsured: 3.6% <br \/> Number Uninsured: 9,000",
"Charleston, WV Metro Area <br \/> Percent Uninsured: 4.8% <br \/> Number Uninsured: 10,000",
"Charleston-North Charleston, SC Metro Area <br \/> Percent Uninsured: 11.3% <br \/> Number Uninsured: 86,000",
"Charlotte-Concord-Gastonia, NC-SC Metro Area <br \/> Percent Uninsured: 10.2% <br \/> Number Uninsured: 255,000",
"Charlottesville, VA Metro Area <br \/> Percent Uninsured: 7.5% <br \/> Number Uninsured: 17,000",
"Chattanooga, TN-GA Metro Area <br \/> Percent Uninsured: 9.7% <br \/> Number Uninsured: 53,000",
"Cheyenne, WY Metro Area <br \/> Percent Uninsured: 10.7% <br \/> Number Uninsured: 10,000",
"Chicago-Naperville-Elgin, IL-IN-WI Metro Area <br \/> Percent Uninsured: 7.6% <br \/> Number Uninsured: 717,000",
"Chico, CA Metro Area <br \/> Percent Uninsured: 5.5% <br \/> Number Uninsured: 12,000",
"Cincinnati, OH-KY-IN Metro Area <br \/> Percent Uninsured: 5.0% <br \/> Number Uninsured: 107,000",
"Clarksville, TN-KY Metro Area <br \/> Percent Uninsured: 8.3% <br \/> Number Uninsured: 22,000",
"Cleveland, TN Metro Area <br \/> Percent Uninsured: 11.2% <br \/> Number Uninsured: 14,000",
"Cleveland-Elyria, OH Metro Area <br \/> Percent Uninsured: 5.1% <br \/> Number Uninsured: 103,000",
"Coeur d'Alene, ID Metro Area <br \/> Percent Uninsured: 7.5% <br \/> Number Uninsured: 12,000",
"College Station-Bryan, TX Metro Area <br \/> Percent Uninsured: 12.6% <br \/> Number Uninsured: 32,000",
"Colorado Springs, CO Metro Area <br \/> Percent Uninsured: 7.0% <br \/> Number Uninsured: 48,000",
"Columbia, MO Metro Area <br \/> Percent Uninsured: 8.3% <br \/> Number Uninsured: 15,000",
"Columbia, SC Metro Area <br \/> Percent Uninsured: 10.0% <br \/> Number Uninsured: 80,000",
"Columbus, GA-AL Metro Area <br \/> Percent Uninsured: 11.0% <br \/> Number Uninsured: 32,000",
"Columbus, IN Metro Area <br \/> Percent Uninsured: 8.8% <br \/> Number Uninsured: 7,000",
"Columbus, OH Metro Area <br \/> Percent Uninsured: 6.6% <br \/> Number Uninsured: 135,000",
"Corpus Christi, TX Metro Area <br \/> Percent Uninsured: 17.5% <br \/> Number Uninsured: 78,000",
"Corvallis, OR Metro Area <br \/> Percent Uninsured: 4.8% <br \/> Number Uninsured: 4,000",
"Crestview-Fort Walton Beach-Destin, FL Metro Area <br \/> Percent Uninsured: 13.2% <br \/> Number Uninsured: 34,000",
"Cumberland, MD-WV Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 5,000",
"Dallas-Fort Worth-Arlington, TX Metro Area <br \/> Percent Uninsured: 16.5% <br \/> Number Uninsured: 1,210,000",
"Dalton, GA Metro Area <br \/> Percent Uninsured: 18.5% <br \/> Number Uninsured: 26,000",
"Danville, IL Metro Area <br \/> Percent Uninsured: 6.2% <br \/> Number Uninsured: 5,000",
"Daphne-Fairhope-Foley, AL Metro Area <br \/> Percent Uninsured: 9.2% <br \/> Number Uninsured: 19,000",
"Davenport-Moline-Rock Island, IA-IL Metro Area <br \/> Percent Uninsured: 4.7% <br \/> Number Uninsured: 18,000",
"Dayton, OH Metro Area <br \/> Percent Uninsured: 5.6% <br \/> Number Uninsured: 44,000",
"Decatur, AL Metro Area <br \/> Percent Uninsured: 8.5% <br \/> Number Uninsured: 13,000",
"Decatur, IL Metro Area <br \/> Percent Uninsured: 3.9% <br \/> Number Uninsured: 4,000",
"Deltona-Daytona Beach-Ormond Beach, FL Metro Area <br \/> Percent Uninsured: 11.0% <br \/> Number Uninsured: 71,000",
"Denver-Aurora-Lakewood, CO Metro Area <br \/> Percent Uninsured: 7.2% <br \/> Number Uninsured: 207,000",
"Des Moines-West Des Moines, IA Metro Area <br \/> Percent Uninsured: 4.4% <br \/> Number Uninsured: 28,000",
"Detroit-Warren-Dearborn, MI Metro Area <br \/> Percent Uninsured: 5.0% <br \/> Number Uninsured: 214,000",
"Dothan, AL Metro Area <br \/> Percent Uninsured: 9.2% <br \/> Number Uninsured: 13,000",
"Dover, DE Metro Area <br \/> Percent Uninsured: 7.6% <br \/> Number Uninsured: 13,000",
"Dubuque, IA Metro Area <br \/> Percent Uninsured: 3.4% <br \/> Number Uninsured: 3,000",
"Duluth, MN-WI Metro Area <br \/> Percent Uninsured: 4.5% <br \/> Number Uninsured: 12,000",
"Durham-Chapel Hill, NC Metro Area <br \/> Percent Uninsured: 10.8% <br \/> Number Uninsured: 61,000",
"East Stroudsburg, PA Metro Area <br \/> Percent Uninsured: 6.9% <br \/> Number Uninsured: 12,000",
"Eau Claire, WI Metro Area <br \/> Percent Uninsured: 6.0% <br \/> Number Uninsured: 10,000",
"El Centro, CA Metro Area <br \/> Percent Uninsured: 6.3% <br \/> Number Uninsured: 11,000",
"Elizabethtown-Fort Knox, KY Metro Area <br \/> Percent Uninsured: 4.8% <br \/> Number Uninsured: 7,000",
"Elkhart-Goshen, IN Metro Area <br \/> Percent Uninsured: 12.5% <br \/> Number Uninsured: 25,000",
"Elmira, NY Metro Area <br \/> Percent Uninsured: 3.9% <br \/> Number Uninsured: 3,000",
"El Paso, TX Metro Area <br \/> Percent Uninsured: 20.6% <br \/> Number Uninsured: 169,000",
"Enid, OK Metro Area <br \/> Percent Uninsured: 17.8% <br \/> Number Uninsured: 11,000",
"Erie, PA Metro Area <br \/> Percent Uninsured: 4.9% <br \/> Number Uninsured: 13,000",
"Eugene, OR Metro Area <br \/> Percent Uninsured: 6.5% <br \/> Number Uninsured: 24,000",
"Evansville, IN-KY Metro Area <br \/> Percent Uninsured: 6.4% <br \/> Number Uninsured: 20,000",
"Fairbanks, AK Metro Area <br \/> Percent Uninsured: 11.2% <br \/> Number Uninsured: 10,000",
"Fargo, ND-MN Metro Area <br \/> Percent Uninsured: 5.8% <br \/> Number Uninsured: 14,000",
"Farmington, NM Metro Area <br \/> Percent Uninsured: 14.0% <br \/> Number Uninsured: 18,000",
"Fayetteville, NC Metro Area <br \/> Percent Uninsured: 10.3% <br \/> Number Uninsured: 36,000",
"Fayetteville-Springdale-Rogers, AR-MO Metro Area <br \/> Percent Uninsured: 10.4% <br \/> Number Uninsured: 55,000",
"Flagstaff, AZ Metro Area <br \/> Percent Uninsured: 10.2% <br \/> Number Uninsured: 14,000",
"Flint, MI Metro Area <br \/> Percent Uninsured: 6.3% <br \/> Number Uninsured: 26,000",
"Florence, SC Metro Area <br \/> Percent Uninsured: 9.4% <br \/> Number Uninsured: 19,000",
"Florence-Muscle Shoals, AL Metro Area <br \/> Percent Uninsured: 9.1% <br \/> Number Uninsured: 13,000",
"Fond du Lac, WI Metro Area <br \/> Percent Uninsured: 3.8% <br \/> Number Uninsured: 4,000",
"Fort Collins, CO Metro Area <br \/> Percent Uninsured: 6.3% <br \/> Number Uninsured: 22,000",
"Fort Smith, AR-OK Metro Area <br \/> Percent Uninsured: 12.3% <br \/> Number Uninsured: 34,000",
"Fort Wayne, IN Metro Area <br \/> Percent Uninsured: 8.0% <br \/> Number Uninsured: 34,000",
"Fresno, CA Metro Area <br \/> Percent Uninsured: 7.7% <br \/> Number Uninsured: 75,000",
"Gadsden, AL Metro Area <br \/> Percent Uninsured: 11.5% <br \/> Number Uninsured: 12,000",
"Gainesville, FL Metro Area <br \/> Percent Uninsured: 9.5% <br \/> Number Uninsured: 27,000",
"Gainesville, GA Metro Area <br \/> Percent Uninsured: 17.5% <br \/> Number Uninsured: 35,000",
"Gettysburg, PA Metro Area <br \/> Percent Uninsured: 6.2% <br \/> Number Uninsured: 6,000",
"Glens Falls, NY Metro Area <br \/> Percent Uninsured: 5.2% <br \/> Number Uninsured: 6,000",
"Goldsboro, NC Metro Area <br \/> Percent Uninsured: 13.6% <br \/> Number Uninsured: 16,000",
"Grand Forks, ND-MN Metro Area <br \/> Percent Uninsured: 6.1% <br \/> Number Uninsured: 6,000",
"Grand Island, NE Metro Area <br \/> Percent Uninsured: 10.5% <br \/> Number Uninsured: 9,000",
"Grand Junction, CO Metro Area <br \/> Percent Uninsured: 7.5% <br \/> Number Uninsured: 11,000",
"Grand Rapids-Wyoming, MI Metro Area <br \/> Percent Uninsured: 5.0% <br \/> Number Uninsured: 52,000",
"Grants Pass, OR Metro Area <br \/> Percent Uninsured: 6.7% <br \/> Number Uninsured: 6,000",
"Great Falls, MT Metro Area <br \/> Percent Uninsured: 8.8% <br \/> Number Uninsured: 7,000",
"Greeley, CO Metro Area <br \/> Percent Uninsured: 7.7% <br \/> Number Uninsured: 23,000",
"Green Bay, WI Metro Area <br \/> Percent Uninsured: 4.9% <br \/> Number Uninsured: 15,000",
"Greensboro-High Point, NC Metro Area <br \/> Percent Uninsured: 10.0% <br \/> Number Uninsured: 76,000",
"Greenville, NC Metro Area <br \/> Percent Uninsured: 8.0% <br \/> Number Uninsured: 14,000",
"Greenville-Anderson-Mauldin, SC Metro Area <br \/> Percent Uninsured: 10.7% <br \/> Number Uninsured: 95,000",
"Gulfport-Biloxi-Pascagoula, MS Metro Area <br \/> Percent Uninsured: 13.5% <br \/> Number Uninsured: 52,000",
"Hagerstown-Martinsburg, MD-WV Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 15,000",
"Hammond, LA Metro Area <br \/> Percent Uninsured: 10.3% <br \/> Number Uninsured: 14,000",
"Hanford-Corcoran, CA Metro Area <br \/> Percent Uninsured: 8.4% <br \/> Number Uninsured: 11,000",
"Harrisburg-Carlisle, PA Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 32,000",
"Harrisonburg, VA Metro Area <br \/> Percent Uninsured: 14.8% <br \/> Number Uninsured: 20,000",
"Hartford-West Hartford-East Hartford, CT Metro Area <br \/> Percent Uninsured: 4.1% <br \/> Number Uninsured: 49,000",
"Hattiesburg, MS Metro Area <br \/> Percent Uninsured: 13.0% <br \/> Number Uninsured: 19,000",
"Hickory-Lenoir-Morganton, NC Metro Area <br \/> Percent Uninsured: 11.7% <br \/> Number Uninsured: 42,000",
"Hilton Head Island-Bluffton-Beaufort, SC Metro Area <br \/> Percent Uninsured: 13.8% <br \/> Number Uninsured: 29,000",
"Hinesville, GA Metro Area <br \/> Percent Uninsured: 13.4% <br \/> Number Uninsured: 10,000",
"Homosassa Springs, FL Metro Area <br \/> Percent Uninsured: 10.0% <br \/> Number Uninsured: 14,000",
"Hot Springs, AR Metro Area <br \/> Percent Uninsured: 10.3% <br \/> Number Uninsured: 10,000",
"Houma-Thibodaux, LA Metro Area <br \/> Percent Uninsured: 7.4% <br \/> Number Uninsured: 15,000",
"Houston-The Woodlands-Sugar Land, TX Metro Area <br \/> Percent Uninsured: 18.2% <br \/> Number Uninsured: 1,243,000",
"Huntington-Ashland, WV-KY-OH Metro Area <br \/> Percent Uninsured: 6.9% <br \/> Number Uninsured: 24,000",
"Huntsville, AL Metro Area <br \/> Percent Uninsured: 8.8% <br \/> Number Uninsured: 39,000",
"Idaho Falls, ID Metro Area <br \/> Percent Uninsured: 6.6% <br \/> Number Uninsured: 10,000",
"Indianapolis-Carmel-Anderson, IN Metro Area <br \/> Percent Uninsured: 8.0% <br \/> Number Uninsured: 161,000",
"Iowa City, IA Metro Area <br \/> Percent Uninsured: 4.5% <br \/> Number Uninsured: 8,000",
"Ithaca, NY Metro Area <br \/> Percent Uninsured: 4.1% <br \/> Number Uninsured: 4,000",
"Jackson, MI Metro Area <br \/> Percent Uninsured: 6.1% <br \/> Number Uninsured: 9,000",
"Jackson, MS Metro Area <br \/> Percent Uninsured: 10.1% <br \/> Number Uninsured: 57,000",
"Jackson, TN Metro Area <br \/> Percent Uninsured: 9.7% <br \/> Number Uninsured: 12,000",
"Jacksonville, FL Metro Area <br \/> Percent Uninsured: 10.9% <br \/> Number Uninsured: 162,000",
"Jacksonville, NC Metro Area <br \/> Percent Uninsured: 7.4% <br \/> Number Uninsured: 11,000",
"Janesville-Beloit, WI Metro Area <br \/> Percent Uninsured: 6.5% <br \/> Number Uninsured: 10,000",
"Jefferson City, MO Metro Area <br \/> Percent Uninsured: 10.2% <br \/> Number Uninsured: 14,000",
"Johnson City, TN Metro Area <br \/> Percent Uninsured: 10.0% <br \/> Number Uninsured: 20,000",
"Johnstown, PA Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 5,000",
"Jonesboro, AR Metro Area <br \/> Percent Uninsured: 7.4% <br \/> Number Uninsured: 10,000",
"Joplin, MO Metro Area <br \/> Percent Uninsured: 12.6% <br \/> Number Uninsured: 22,000",
"Kahului-Wailuku-Lahaina, HI Metro Area <br \/> Percent Uninsured: 4.6% <br \/> Number Uninsured: 8,000",
"Kalamazoo-Portage, MI Metro Area <br \/> Percent Uninsured: 5.4% <br \/> Number Uninsured: 18,000",
"Kankakee, IL Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 6,000",
"Kansas City, MO-KS Metro Area <br \/> Percent Uninsured: 9.1% <br \/> Number Uninsured: 190,000",
"Kennewick-Richland, WA Metro Area <br \/> Percent Uninsured: 9.7% <br \/> Number Uninsured: 28,000",
"Killeen-Temple, TX Metro Area <br \/> Percent Uninsured: 11.5% <br \/> Number Uninsured: 47,000",
"Kingsport-Bristol-Bristol, TN-VA Metro Area <br \/> Percent Uninsured: 7.7% <br \/> Number Uninsured: 23,000",
"Kingston, NY Metro Area <br \/> Percent Uninsured: 6.2% <br \/> Number Uninsured: 11,000",
"Knoxville, TN Metro Area <br \/> Percent Uninsured: 8.4% <br \/> Number Uninsured: 73,000",
"Kokomo, IN Metro Area <br \/> Percent Uninsured: 5.9% <br \/> Number Uninsured: 5,000",
"La Crosse-Onalaska, WI-MN Metro Area <br \/> Percent Uninsured: 4.3% <br \/> Number Uninsured: 6,000",
"Lafayette, LA Metro Area <br \/> Percent Uninsured: 9.8% <br \/> Number Uninsured: 48,000",
"Lafayette-West Lafayette, IN Metro Area <br \/> Percent Uninsured: 7.1% <br \/> Number Uninsured: 15,000",
"Lake Charles, LA Metro Area <br \/> Percent Uninsured: 8.3% <br \/> Number Uninsured: 17,000",
"Lake Havasu City-Kingman, AZ Metro Area <br \/> Percent Uninsured: 10.4% <br \/> Number Uninsured: 21,000",
"Lakeland-Winter Haven, FL Metro Area <br \/> Percent Uninsured: 12.3% <br \/> Number Uninsured: 84,000",
"Lancaster, PA Metro Area <br \/> Percent Uninsured: 13.0% <br \/> Number Uninsured: 70,000",
"Lansing-East Lansing, MI Metro Area <br \/> Percent Uninsured: 4.5% <br \/> Number Uninsured: 21,000",
"Laredo, TX Metro Area <br \/> Percent Uninsured: 28.9% <br \/> Number Uninsured: 79,000",
"Las Cruces, NM Metro Area <br \/> Percent Uninsured: 9.5% <br \/> Number Uninsured: 20,000",
"Las Vegas-Henderson-Paradise, NV Metro Area <br \/> Percent Uninsured: 11.8% <br \/> Number Uninsured: 257,000",
"Lawrence, KS Metro Area <br \/> Percent Uninsured: 7.3% <br \/> Number Uninsured: 9,000",
"Lawton, OK Metro Area <br \/> Percent Uninsured: 13.6% <br \/> Number Uninsured: 16,000",
"Lebanon, PA Metro Area <br \/> Percent Uninsured: 10.0% <br \/> Number Uninsured: 14,000",
"Lewiston, ID-WA Metro Area <br \/> Percent Uninsured: 5.0% <br \/> Number Uninsured: 3,000",
"Lewiston-Auburn, ME Metro Area <br \/> Percent Uninsured: 9.1% <br \/> Number Uninsured: 10,000",
"Lexington-Fayette, KY Metro Area <br \/> Percent Uninsured: 6.1% <br \/> Number Uninsured: 31,000",
"Lima, OH Metro Area <br \/> Percent Uninsured: 7.2% <br \/> Number Uninsured: 7,000",
"Lincoln, NE Metro Area <br \/> Percent Uninsured: 6.9% <br \/> Number Uninsured: 23,000",
"Little Rock-North Little Rock-Conway, AR Metro Area <br \/> Percent Uninsured: 7.0% <br \/> Number Uninsured: 51,000",
"Logan, UT-ID Metro Area <br \/> Percent Uninsured: 7.9% <br \/> Number Uninsured: 11,000",
"Longview, TX Metro Area <br \/> Percent Uninsured: 17.2% <br \/> Number Uninsured: 36,000",
"Longview, WA Metro Area <br \/> Percent Uninsured: 3.8% <br \/> Number Uninsured: 4,000",
"Los Angeles-Long Beach-Anaheim, CA Metro Area <br \/> Percent Uninsured: 8.6% <br \/> Number Uninsured: 1,142,000",
"Louisville\/Jefferson County, KY-IN Metro Area <br \/> Percent Uninsured: 5.3% <br \/> Number Uninsured: 68,000",
"Lubbock, TX Metro Area <br \/> Percent Uninsured: 12.5% <br \/> Number Uninsured: 39,000",
"Lynchburg, VA Metro Area <br \/> Percent Uninsured: 8.2% <br \/> Number Uninsured: 21,000",
"Macon-Bibb County, GA Metro Area <br \/> Percent Uninsured: 11.3% <br \/> Number Uninsured: 25,000",
"Madera, CA Metro Area <br \/> Percent Uninsured: 8.9% <br \/> Number Uninsured: 13,000",
"Madison, WI Metro Area <br \/> Percent Uninsured: 4.2% <br \/> Number Uninsured: 27,000",
"Manchester-Nashua, NH Metro Area <br \/> Percent Uninsured: 6.0% <br \/> Number Uninsured: 24,000",
"Manhattan, KS Metro Area <br \/> Percent Uninsured: 5.8% <br \/> Number Uninsured: 5,000",
"Mankato-North Mankato, MN Metro Area <br \/> Percent Uninsured: 3.5% <br \/> Number Uninsured: 4,000",
"Mansfield, OH Metro Area <br \/> Percent Uninsured: 10.2% <br \/> Number Uninsured: 12,000",
"McAllen-Edinburg-Mission, TX Metro Area <br \/> Percent Uninsured: 30.0% <br \/> Number Uninsured: 255,000",
"Medford, OR Metro Area <br \/> Percent Uninsured: 7.8% <br \/> Number Uninsured: 17,000",
"Memphis, TN-MS-AR Metro Area <br \/> Percent Uninsured: 9.8% <br \/> Number Uninsured: 130,000",
"Merced, CA Metro Area <br \/> Percent Uninsured: 7.5% <br \/> Number Uninsured: 20,000",
"Miami-Fort Lauderdale-West Palm Beach, FL Metro Area <br \/> Percent Uninsured: 15.5% <br \/> Number Uninsured: 950,000",
"Michigan City-La Porte, IN Metro Area <br \/> Percent Uninsured: 7.3% <br \/> Number Uninsured: 7,000",
"Midland, MI Metro Area <br \/> Percent Uninsured: 3.1% <br \/> Number Uninsured: 3,000",
"Midland, TX Metro Area <br \/> Percent Uninsured: 19.4% <br \/> Number Uninsured: 33,000",
"Milwaukee-Waukesha-West Allis, WI Metro Area <br \/> Percent Uninsured: 5.5% <br \/> Number Uninsured: 86,000",
"Minneapolis-St. Paul-Bloomington, MN-WI Metro Area <br \/> Percent Uninsured: 4.2% <br \/> Number Uninsured: 151,000",
"Missoula, MT Metro Area <br \/> Percent Uninsured: 5.4% <br \/> Number Uninsured: 6,000",
"Mobile, AL Metro Area <br \/> Percent Uninsured: 10.8% <br \/> Number Uninsured: 44,000",
"Modesto, CA Metro Area <br \/> Percent Uninsured: 5.1% <br \/> Number Uninsured: 28,000",
"Monroe, LA Metro Area <br \/> Percent Uninsured: 7.6% <br \/> Number Uninsured: 13,000",
"Monroe, MI Metro Area <br \/> Percent Uninsured: 4.6% <br \/> Number Uninsured: 7,000",
"Montgomery, AL Metro Area <br \/> Percent Uninsured: 8.4% <br \/> Number Uninsured: 30,000",
"Morgantown, WV Metro Area <br \/> Percent Uninsured: 5.3% <br \/> Number Uninsured: 7,000",
"Morristown, TN Metro Area <br \/> Percent Uninsured: 9.8% <br \/> Number Uninsured: 11,000",
"Mount Vernon-Anacortes, WA Metro Area <br \/> Percent Uninsured: 6.6% <br \/> Number Uninsured: 8,000",
"Muncie, IN Metro Area <br \/> Percent Uninsured: 7.8% <br \/> Number Uninsured: 9,000",
"Muskegon, MI Metro Area <br \/> Percent Uninsured: 3.8% <br \/> Number Uninsured: 6,000",
"Myrtle Beach-Conway-North Myrtle Beach, SC-NC Metro Area <br \/> Percent Uninsured: 13.2% <br \/> Number Uninsured: 61,000",
"Napa, CA Metro Area <br \/> Percent Uninsured: 7.2% <br \/> Number Uninsured: 10,000",
"Naples-Immokalee-Marco Island, FL Metro Area <br \/> Percent Uninsured: 16.0% <br \/> Number Uninsured: 59,000",
"Nashville-Davidson--Murfreesboro--Franklin, TN Metro Area <br \/> Percent Uninsured: 9.5% <br \/> Number Uninsured: 179,000",
"New Bern, NC Metro Area <br \/> Percent Uninsured: 10.8% <br \/> Number Uninsured: 13,000",
"New Haven-Milford, CT Metro Area <br \/> Percent Uninsured: 4.5% <br \/> Number Uninsured: 38,000",
"New Orleans-Metairie, LA Metro Area <br \/> Percent Uninsured: 9.0% <br \/> Number Uninsured: 113,000",
"New York-Newark-Jersey City, NY-NJ-PA Metro Area <br \/> Percent Uninsured: 7.0% <br \/> Number Uninsured: 1,405,000",
"Niles-Benton Harbor, MI Metro Area <br \/> Percent Uninsured: 6.0% <br \/> Number Uninsured: 9,000",
"North Port-Sarasota-Bradenton, FL Metro Area <br \/> Percent Uninsured: 11.2% <br \/> Number Uninsured: 89,000",
"Norwich-New London, CT Metro Area <br \/> Percent Uninsured: 3.7% <br \/> Number Uninsured: 10,000",
"Ocala, FL Metro Area <br \/> Percent Uninsured: 10.5% <br \/> Number Uninsured: 36,000",
"Ocean City, NJ Metro Area <br \/> Percent Uninsured: 4.7% <br \/> Number Uninsured: 4,000",
"Odessa, TX Metro Area <br \/> Percent Uninsured: 21.1% <br \/> Number Uninsured: 33,000",
"Ogden-Clearfield, UT Metro Area <br \/> Percent Uninsured: 6.9% <br \/> Number Uninsured: 46,000",
"Oklahoma City, OK Metro Area <br \/> Percent Uninsured: 12.5% <br \/> Number Uninsured: 170,000",
"Olympia-Tumwater, WA Metro Area <br \/> Percent Uninsured: 4.9% <br \/> Number Uninsured: 14,000",
"Omaha-Council Bluffs, NE-IA Metro Area <br \/> Percent Uninsured: 8.0% <br \/> Number Uninsured: 74,000",
"Orlando-Kissimmee-Sanford, FL Metro Area <br \/> Percent Uninsured: 12.5% <br \/> Number Uninsured: 312,000",
"Oshkosh-Neenah, WI Metro Area <br \/> Percent Uninsured: 4.7% <br \/> Number Uninsured: 8,000",
"Owensboro, KY Metro Area <br \/> Percent Uninsured: 3.5% <br \/> Number Uninsured: 4,000",
"Oxnard-Thousand Oaks-Ventura, CA Metro Area <br \/> Percent Uninsured: 8.4% <br \/> Number Uninsured: 72,000",
"Palm Bay-Melbourne-Titusville, FL Metro Area <br \/> Percent Uninsured: 9.9% <br \/> Number Uninsured: 58,000",
"Panama City, FL Metro Area <br \/> Percent Uninsured: 12.8% <br \/> Number Uninsured: 25,000",
"Parkersburg-Vienna, WV Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 5,000",
"Pensacola-Ferry Pass-Brent, FL Metro Area <br \/> Percent Uninsured: 10.1% <br \/> Number Uninsured: 47,000",
"Peoria, IL Metro Area <br \/> Percent Uninsured: 4.9% <br \/> Number Uninsured: 18,000",
"Philadelphia-Camden-Wilmington, PA-NJ-DE-MD Metro Area <br \/> Percent Uninsured: 5.2% <br \/> Number Uninsured: 311,000",
"Phoenix-Mesa-Scottsdale, AZ Metro Area <br \/> Percent Uninsured: 10.2% <br \/> Number Uninsured: 479,000",
"Pine Bluff, AR Metro Area <br \/> Percent Uninsured: 5.8% <br \/> Number Uninsured: 5,000",
"Pittsburgh, PA Metro Area <br \/> Percent Uninsured: 3.5% <br \/> Number Uninsured: 82,000",
"Pittsfield, MA Metro Area <br \/> Percent Uninsured: 2.1% <br \/> Number Uninsured: 3,000",
"Pocatello, ID Metro Area <br \/> Percent Uninsured: 8.0% <br \/> Number Uninsured: 7,000",
"Portland-South Portland, ME Metro Area <br \/> Percent Uninsured: 6.3% <br \/> Number Uninsured: 33,000",
"Portland-Vancouver-Hillsboro, OR-WA Metro Area <br \/> Percent Uninsured: 6.2% <br \/> Number Uninsured: 151,000",
"Port St. Lucie, FL Metro Area <br \/> Percent Uninsured: 11.6% <br \/> Number Uninsured: 54,000",
"Prescott, AZ Metro Area <br \/> Percent Uninsured: 9.5% <br \/> Number Uninsured: 22,000",
"Providence-Warwick, RI-MA Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 64,000",
"Provo-Orem, UT Metro Area <br \/> Percent Uninsured: 7.5% <br \/> Number Uninsured: 46,000",
"Pueblo, CO Metro Area <br \/> Percent Uninsured: 6.5% <br \/> Number Uninsured: 11,000",
"Punta Gorda, FL Metro Area <br \/> Percent Uninsured: 11.7% <br \/> Number Uninsured: 21,000",
"Racine, WI Metro Area <br \/> Percent Uninsured: 4.4% <br \/> Number Uninsured: 8,000",
"Raleigh, NC Metro Area <br \/> Percent Uninsured: 10.0% <br \/> Number Uninsured: 132,000",
"Rapid City, SD Metro Area <br \/> Percent Uninsured: 11.7% <br \/> Number Uninsured: 17,000",
"Reading, PA Metro Area <br \/> Percent Uninsured: 5.8% <br \/> Number Uninsured: 24,000",
"Redding, CA Metro Area <br \/> Percent Uninsured: 6.5% <br \/> Number Uninsured: 12,000",
"Reno, NV Metro Area <br \/> Percent Uninsured: 9.9% <br \/> Number Uninsured: 46,000",
"Richmond, VA Metro Area <br \/> Percent Uninsured: 8.6% <br \/> Number Uninsured: 109,000",
"Riverside-San Bernardino-Ontario, CA Metro Area <br \/> Percent Uninsured: 7.8% <br \/> Number Uninsured: 351,000",
"Roanoke, VA Metro Area <br \/> Percent Uninsured: 8.3% <br \/> Number Uninsured: 26,000",
"Rochester, MN Metro Area <br \/> Percent Uninsured: 3.9% <br \/> Number Uninsured: 8,000",
"Rochester, NY Metro Area <br \/> Percent Uninsured: 3.6% <br \/> Number Uninsured: 39,000",
"Rockford, IL Metro Area <br \/> Percent Uninsured: 5.9% <br \/> Number Uninsured: 20,000",
"Rocky Mount, NC Metro Area <br \/> Percent Uninsured: 9.8% <br \/> Number Uninsured: 14,000",
"Rome, GA Metro Area <br \/> Percent Uninsured: 15.4% <br \/> Number Uninsured: 15,000",
"Sacramento--Roseville--Arden-Arcade, CA Metro Area <br \/> Percent Uninsured: 5.0% <br \/> Number Uninsured: 115,000",
"Saginaw, MI Metro Area <br \/> Percent Uninsured: 5.2% <br \/> Number Uninsured: 10,000",
"St. Cloud, MN Metro Area <br \/> Percent Uninsured: 3.4% <br \/> Number Uninsured: 7,000",
"St. George, UT Metro Area <br \/> Percent Uninsured: 13.5% <br \/> Number Uninsured: 22,000",
"St. Joseph, MO-KS Metro Area <br \/> Percent Uninsured: 8.7% <br \/> Number Uninsured: 10,000",
"St. Louis, MO-IL Metro Area <br \/> Percent Uninsured: 6.3% <br \/> Number Uninsured: 176,000",
"Salem, OR Metro Area <br \/> Percent Uninsured: 8.5% <br \/> Number Uninsured: 36,000",
"Salinas, CA Metro Area <br \/> Percent Uninsured: 9.9% <br \/> Number Uninsured: 42,000",
"Salisbury, MD-DE Metro Area <br \/> Percent Uninsured: 6.2% <br \/> Number Uninsured: 25,000",
"Salt Lake City, UT Metro Area <br \/> Percent Uninsured: 9.7% <br \/> Number Uninsured: 116,000",
"San Angelo, TX Metro Area <br \/> Percent Uninsured: 15.5% <br \/> Number Uninsured: 18,000",
"San Antonio-New Braunfels, TX Metro Area <br \/> Percent Uninsured: 14.5% <br \/> Number Uninsured: 354,000",
"San Diego-Carlsbad, CA Metro Area <br \/> Percent Uninsured: 7.7% <br \/> Number Uninsured: 250,000",
"San Francisco-Oakland-Hayward, CA Metro Area <br \/> Percent Uninsured: 4.5% <br \/> Number Uninsured: 210,000",
"San Jose-Sunnyvale-Santa Clara, CA Metro Area <br \/> Percent Uninsured: 4.2% <br \/> Number Uninsured: 83,000",
"San Luis Obispo-Paso Robles-Arroyo Grande, CA Metro Area <br \/> Percent Uninsured: 6.0% <br \/> Number Uninsured: 17,000",
"Santa Cruz-Watsonville, CA Metro Area <br \/> Percent Uninsured: 5.5% <br \/> Number Uninsured: 15,000",
"Santa Fe, NM Metro Area <br \/> Percent Uninsured: 10.2% <br \/> Number Uninsured: 15,000",
"Santa Maria-Santa Barbara, CA Metro Area <br \/> Percent Uninsured: 9.3% <br \/> Number Uninsured: 41,000",
"Santa Rosa, CA Metro Area <br \/> Percent Uninsured: 5.3% <br \/> Number Uninsured: 27,000",
"Savannah, GA Metro Area <br \/> Percent Uninsured: 14.0% <br \/> Number Uninsured: 53,000",
"Scranton--Wilkes-Barre--Hazleton, PA Metro Area <br \/> Percent Uninsured: 4.5% <br \/> Number Uninsured: 24,000",
"Seattle-Tacoma-Bellevue, WA Metro Area <br \/> Percent Uninsured: 5.6% <br \/> Number Uninsured: 215,000",
"Sebastian-Vero Beach, FL Metro Area <br \/> Percent Uninsured: 10.4% <br \/> Number Uninsured: 16,000",
"Sebring, FL Metro Area <br \/> Percent Uninsured: 9.9% <br \/> Number Uninsured: 10,000",
"Sheboygan, WI Metro Area <br \/> Percent Uninsured: 5.0% <br \/> Number Uninsured: 6,000",
"Sherman-Denison, TX Metro Area <br \/> Percent Uninsured: 15.5% <br \/> Number Uninsured: 20,000",
"Shreveport-Bossier City, LA Metro Area <br \/> Percent Uninsured: 7.3% <br \/> Number Uninsured: 31,000",
"Sierra Vista-Douglas, AZ Metro Area <br \/> Percent Uninsured: 7.8% <br \/> Number Uninsured: 9,000",
"Sioux City, IA-NE-SD Metro Area <br \/> Percent Uninsured: 7.7% <br \/> Number Uninsured: 13,000",
"Sioux Falls, SD Metro Area <br \/> Percent Uninsured: 5.8% <br \/> Number Uninsured: 15,000",
"South Bend-Mishawaka, IN-MI Metro Area <br \/> Percent Uninsured: 8.0% <br \/> Number Uninsured: 26,000",
"Spartanburg, SC Metro Area <br \/> Percent Uninsured: 10.3% <br \/> Number Uninsured: 34,000",
"Spokane-Spokane Valley, WA Metro Area <br \/> Percent Uninsured: 5.5% <br \/> Number Uninsured: 30,000",
"Springfield, IL Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 8,000",
"Springfield, MA Metro Area <br \/> Percent Uninsured: 2.6% <br \/> Number Uninsured: 16,000",
"Springfield, MO Metro Area <br \/> Percent Uninsured: 10.2% <br \/> Number Uninsured: 46,000",
"Springfield, OH Metro Area <br \/> Percent Uninsured: 6.1% <br \/> Number Uninsured: 8,000",
"State College, PA Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 6,000",
"Staunton-Waynesboro, VA Metro Area <br \/> Percent Uninsured: 8.8% <br \/> Number Uninsured: 10,000",
"Stockton-Lodi, CA Metro Area <br \/> Percent Uninsured: 6.7% <br \/> Number Uninsured: 49,000",
"Sumter, SC Metro Area <br \/> Percent Uninsured: 12.7% <br \/> Number Uninsured: 13,000",
"Syracuse, NY Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 26,000",
"Tallahassee, FL Metro Area <br \/> Percent Uninsured: 8.8% <br \/> Number Uninsured: 33,000",
"Tampa-St. Petersburg-Clearwater, FL Metro Area <br \/> Percent Uninsured: 12.1% <br \/> Number Uninsured: 372,000",
"Terre Haute, IN Metro Area <br \/> Percent Uninsured: 5.8% <br \/> Number Uninsured: 9,000",
"Texarkana, TX-AR Metro Area <br \/> Percent Uninsured: 10.6% <br \/> Number Uninsured: 15,000",
"The Villages, FL Metro Area <br \/> Percent Uninsured: 4.6% <br \/> Number Uninsured: 5,000",
"Toledo, OH Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 34,000",
"Topeka, KS Metro Area <br \/> Percent Uninsured: 7.1% <br \/> Number Uninsured: 16,000",
"Trenton, NJ Metro Area <br \/> Percent Uninsured: 8.3% <br \/> Number Uninsured: 31,000",
"Tucson, AZ Metro Area <br \/> Percent Uninsured: 8.3% <br \/> Number Uninsured: 83,000",
"Tulsa, OK Metro Area <br \/> Percent Uninsured: 13.2% <br \/> Number Uninsured: 130,000",
"Tuscaloosa, AL Metro Area <br \/> Percent Uninsured: 6.7% <br \/> Number Uninsured: 16,000",
"Tyler, TX Metro Area <br \/> Percent Uninsured: 17.2% <br \/> Number Uninsured: 39,000",
"Urban Honolulu, HI Metro Area <br \/> Percent Uninsured: 3.3% <br \/> Number Uninsured: 31,000",
"Utica-Rome, NY Metro Area <br \/> Percent Uninsured: 4.8% <br \/> Number Uninsured: 14,000",
"Valdosta, GA Metro Area <br \/> Percent Uninsured: 16.3% <br \/> Number Uninsured: 23,000",
"Vallejo-Fairfield, CA Metro Area <br \/> Percent Uninsured: 4.9% <br \/> Number Uninsured: 21,000",
"Victoria, TX Metro Area <br \/> Percent Uninsured: 17.2% <br \/> Number Uninsured: 17,000",
"Vineland-Bridgeton, NJ Metro Area <br \/> Percent Uninsured: 10.3% <br \/> Number Uninsured: 15,000",
"Virginia Beach-Norfolk-Newport News, VA-NC Metro Area <br \/> Percent Uninsured: 8.9% <br \/> Number Uninsured: 145,000",
"Visalia-Porterville, CA Metro Area <br \/> Percent Uninsured: 7.3% <br \/> Number Uninsured: 34,000",
"Waco, TX Metro Area <br \/> Percent Uninsured: 14.6% <br \/> Number Uninsured: 38,000",
"Walla Walla, WA Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 4,000",
"Warner Robins, GA Metro Area <br \/> Percent Uninsured: 13.7% <br \/> Number Uninsured: 25,000",
"Washington-Arlington-Alexandria, DC-VA-MD-WV Metro Area <br \/> Percent Uninsured: 7.6% <br \/> Number Uninsured: 466,000",
"Waterloo-Cedar Falls, IA Metro Area <br \/> Percent Uninsured: 4.4% <br \/> Number Uninsured: 7,000",
"Watertown-Fort Drum, NY Metro Area <br \/> Percent Uninsured: 4.0% <br \/> Number Uninsured: 4,000",
"Wausau, WI Metro Area <br \/> Percent Uninsured: 6.4% <br \/> Number Uninsured: 9,000",
"Weirton-Steubenville, WV-OH Metro Area <br \/> Percent Uninsured: 6.0% <br \/> Number Uninsured: 7,000",
"Wenatchee, WA Metro Area <br \/> Percent Uninsured: 8.7% <br \/> Number Uninsured: 10,000",
"Wheeling, WV-OH Metro Area <br \/> Percent Uninsured: 6.6% <br \/> Number Uninsured: 9,000",
"Wichita, KS Metro Area <br \/> Percent Uninsured: 9.5% <br \/> Number Uninsured: 61,000",
"Wichita Falls, TX Metro Area <br \/> Percent Uninsured: 15.3% <br \/> Number Uninsured: 21,000",
"Williamsport, PA Metro Area <br \/> Percent Uninsured: 6.0% <br \/> Number Uninsured: 7,000",
"Wilmington, NC Metro Area <br \/> Percent Uninsured: 10.8% <br \/> Number Uninsured: 31,000",
"Winchester, VA-WV Metro Area <br \/> Percent Uninsured: 8.9% <br \/> Number Uninsured: 12,000",
"Winston-Salem, NC Metro Area <br \/> Percent Uninsured: 10.5% <br \/> Number Uninsured: 69,000",
"Worcester, MA-CT Metro Area <br \/> Percent Uninsured: 2.9% <br \/> Number Uninsured: 27,000",
"Yakima, WA Metro Area <br \/> Percent Uninsured: 11.2% <br \/> Number Uninsured: 28,000",
"York-Hanover, PA Metro Area <br \/> Percent Uninsured: 5.7% <br \/> Number Uninsured: 25,000",
"Youngstown-Warren-Boardman, OH-PA Metro Area <br \/> Percent Uninsured: 5.8% <br \/> Number Uninsured: 31,000",
"Yuba City, CA Metro Area <br \/> Percent Uninsured: 6.8% <br \/> Number Uninsured: 12,000",
"Yuma, AZ Metro Area <br \/> Percent Uninsured: 11.1% <br \/> Number Uninsured: 22,000"]
cities = []
for i in list_uninsured:
cities.append(i.split('<')[0].strip())
uninsured = []
for i in list_uninsured:
uninsured.append(float(i.split('%')[0].strip()[-5:].strip()))
df_uninsured = pd.DataFrame({"cities": cities, "uninsured_percent": uninsured})
df_uninsured.to_csv("data/Job_Security/uninsured_percent.csv") |
985,518 | 5804e6dd9cf1dfd4c49520575639362b4eba97bc | import pygame
pygame.init()
class Display():
displayWidth = 800
displayHeight = 600
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
gameDisplay = pygame.display.set_mode((displayWidth, displayHeight)) #Init display
def text_objects(self, text, font):
textSurface = font.render(text, True, self.white) #AA - True
return textSurface, textSurface.get_rect()
def message_display(self, text, size, x, y):
textDesc = pygame.font.Font('retro.ttf', size)
textSurf, textRect = self.text_objects(text, textDesc) #textSurf contains text, color and AA
textRect.center = (x, y)
self.gameDisplay.blit(textSurf, textRect)
|
985,519 | 2d27c65ac49c63f61621a0a227e0237df76d46ca | ''' Read input from STDIN. Print your output to STDOUT '''
#Use input() to read input from STDIN and use print to write your output to STDOUT
def main():
t = int(input())
for _ in range(t):
n = int(input())
integerList = list(map(int,input().split()))
maxsum = 0
for i in range(n):
ele = i+2
if ele != n:
new_sum = integerList[i] + integerList[ele]
if new_sum > maxsum:
maxsum = new_sum
first = integerList[i]
last = integerList[ele]
ele += 1
else:
break
listMax = max(integerList)
if first+last > listMax:
print(last,first,sep='')
else:
print(listMax)
main()
|
985,520 | 191cfb38c8a7490729741e10da938f54f4cc6aae | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import librosa
import sklearn
import warnings
import data_reading
import noisereduce as nr
import preprocessing
# A function that prevents warnings when loading in files with librosa
warnings.simplefilter("ignore")
# A function for calculating autocorrelation of a signal
def autocorr(x, t=1):
return np.corrcoef(np.array([x[:-t], x[t:]]))
# Add the path of each file to the train.csv
base_dir = data_reading.read_config()
df_train = pd.read_csv(base_dir + "train.csv")
####### !!!!!!!!!!!!!!! ##########
####### Run these two lines below once if you've never run this file before. It adds a filepath to each file in train.csv #########
# df_train['full_path'] = base_dir + "train_audio/" + df_train['ebird_code'] + '/' + df_train['filename']
# df_train.to_csv(base_dir + "train.csv")
""" Split a soundwave up in frames """
def get_frames(samples, window_width, stepsize):
nr_of_frames = (len(samples) - window_width + stepsize) // stepsize
frames = np.array([samples[stepsize*n:stepsize*n+window_width] for n in range(nr_of_frames)])
return nr_of_frames, frames
""" Multiply a series of frames with a window function (hammig window)"""
def window_function_transform(frames):
from scipy.signal.windows import hamming
# Construct a Hamming window with the same number of datapoints as 1 frame.
window = hamming(frames.shape[1])
# Multiply all frames with our window function
new_frames = np.array([frame*window for frame in frames])
return new_frames
""" Compute the statistical features of each frame that we need for our noise/non-noise heuristic """
def get_statistcal_features(frames, heuristic="energy"):
# Initiate the necessary lists where we want to store our features
energies = np.array([np.sum(frame**2) for frame in frames])
return energies, np.mean(energies)
""" Helps us with the automatic calculation of the energy coeff. for each file. The higher the S/N is, the higher the energy coeff. is. """
def compute_energy_coefficient(samples, base_coefficient=1):
# Compute an approximation of the Signal-to-Noise-Ratio
SNR = np.abs( np.log10( np.abs( ( np.mean(samples) ) / ( np.std(samples) ) ) ) )
# Compute the energy coefficient
base_coefficient = base_coefficient
energy_coefficient = base_coefficient * ( ( 1 / (SNR) ) ** 2 )
return SNR, energy_coefficient
""" The function that bring it all together. """
def get_noise_frames(samples, sampling_rate, window_width=2048, stepsize=512, verbose=False):
""" Preparation for separating pure noise from non-pure noise. """
# Separate the samples in frames according to the window_width and stepsize
nr_of_frames, frames = get_frames(samples, window_width=window_width, stepsize=stepsize)
# Use a window function (hamming works best) on all our frames
frames = window_function_transform(frames)
# Get the statistical features that we need. For now only 'energy' works.
energies, mean_energy = get_statistcal_features( frames )
# Get the energy coefficient that we need for separating pure noise from non-pure noise.
SNR, energy_coefficient = compute_energy_coefficient(samples, base_coefficient=2)
if verbose:
print("Energy coefficient: " + str(round(energy_coefficient, 3) ) )
print("Signal-to-Noise: " + str(round(SNR, 3)))
""" Separating pure noise from non-pure noise. """
# Initiate lists to store the separated frames in.
noisy_frames = []
non_noisy_frames = []
noisy_energy = []
non_noisy_energy = []
# Go through all of the frame-energies. The ones below a certain threshold have a very high chance of being pure background noise.
for index, energy in enumerate(energies):
if energy < energy_coefficient * mean_energy:
# Add the pure noisy parts to the appropriate list
noisy_frames.extend(frames[index][int((window_width-stepsize)/2):int((window_width+stepsize)/2)])
noisy_energy.append(energy)
else:
# Add the non-noise frames to the appropriate list
non_noisy_frames.extend(frames[index][int((window_width-stepsize)/2):int((window_width+stepsize)/2)])
non_noisy_energy.append(energy)
if verbose:
# A measure for how well the noise is predictable (higher is better). The better predictable it is, the better a spectral noise gate will work
print("Noise predictability: " + str(round(autocorr(noisy_frames)[0,1] / autocorr(non_noisy_frames)[0,1], 3) ) )
""" Plotting """
# Initiate time domain axes for some different graphs
t_soundwave = np.linspace(0, len(samples)/sampling_rate, len(samples))
t_soundwave_noisy = np.linspace(0, len(noisy_frames)/sampling_rate, len(noisy_frames))
t_soundwave_non_noisy = np.linspace(0, len(non_noisy_frames)/sampling_rate, len(non_noisy_frames))
t_windowed_features = np.linspace(0, len(samples)/sampling_rate, nr_of_frames)
t_windowed_features_noisy = np.linspace(0, len(noisy_frames)/sampling_rate, len(noisy_energy))
t_windowed_features_non_noisy = np.linspace(0, len(non_noisy_frames)/sampling_rate, len(non_noisy_energy))
# Plot the signal versus the signal energy
plt.figure(figsize=(20,12))
plt.title("Energy whole signal")
plt.plot(t_soundwave, preprocessing.normalize(samples), alpha=0.5)
plt.plot(t_windowed_features, preprocessing.normalize(energies))
plt.show()
# Plot the signal versus the signal energy
plt.figure(figsize=(20,12))
plt.title("Energy pure noise signal")
plt.plot(t_soundwave_noisy, preprocessing.normalize(noisy_frames), alpha=0.5)
plt.plot(t_windowed_features_noisy, preprocessing.normalize(noisy_energy) )
plt.show()
# Plot the signal versus the signal energy
plt.figure(figsize=(20,12))
plt.title("Energy non pure noise signal")
plt.plot(t_soundwave_non_noisy, preprocessing.normalize(non_noisy_frames), alpha=0.5)
plt.plot(t_windowed_features_non_noisy, preprocessing.normalize(non_noisy_energy))
plt.show()
return np.array(noisy_frames)
def filter_sound(samples, sampling_rate, window_width=2048, stepsize=512, verbose=False):
noise = get_noise_frames(samples=samples, sampling_rate=sampling_rate, window_width=window_width, stepsize=stepsize, verbose=verbose)
if len(noise) > 0:
reduced_noise = nr.reduce_noise(audio_clip=samples, noise_clip=noise, verbose=verbose)
return preprocessing.normalize(reduced_noise)
else:
return samples
if __name__ == "__main__":
samples, sampling_rate = librosa.load(df_train["full_path"][3])
filter_sound(samples, sampling_rate, verbose=True)
|
985,521 | d6a1593b170d710ef9726610f5c824a46e9a3be0 | from random import randint
print("{:=^40}" .format(" JOGO DE DADOS "))
dado = randint(1, 7)
opcao = 1
print('''VOCÊ GOSTARIA DE JOGAR O DADO?
[ 1 ] SIM
[ 2 ] NÃO''')
opcao = int(input("Escolha sua opção: "))
while opcao != 1 and opcao != 2:
print("OPÇÃO INVÁLIDA! Escolha uma nova opção!")
opcao = int(input("Escolha sua opção: "))
if opcao == 1:
while opcao == 1:
print("Você jogou o dado e seu número é {}" .format(dado))
opcao = int(input("Escolha uma nova opção: "))
elif opcao == 2:
print("Você escolheu sair, obrigado e volte sempre!") |
985,522 | 53b8b16cd8940c03f2ee686c5d6978f0a7a7758a | #!/usr/bin/env python
import roslib
import rospy
import tf
import time
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
import math
#class Kinect:
def turn(y_pos, lr_lim):
if y_pos < -lr_lim or y_pos > lr_lim:
result = math.copysign(angle,y_pos)
else:
result = 0
# if lr < 0:
# result = -turn
# else:
# result = turn
return result
def obstacle_turn(sensor_data):
right_average = math.mean(sensor_data[:200])
left_average = math.mean(sensor_data[300:])
result = left_average - right_average
if result < 0:
result = -(angle)
else:
result = angle
return result
def follow_person(userid):
global check_fb
global check_lr
global count
global user
try:
frame_data = []
listener.waitForTransform(BASE_FRAME, "/%s_%d" % (FRAME, userid), rospy.Time(), rospy.Duration(10))
trans, rot = listener.lookupTransform(BASE_FRAME, "/%s_%d" % (FRAME, user), LAST)
frame_data.append(trans)
fb, lr, _ = frame_data[0]
for num, data in enumerate(sensor_data.ranges):
if isnan(data):
data_sns.append(0)
elif isinf(data):
data_sns.append(max_range)
else:
data_sns.append(data)
for i in range(len(data_sns[detection_arc_start:detection_arc_end]) - object_size):
object_count = 0
for j in range(object_size):
if(data_sns[detection_arc_start+i+j]) < detection_distance:
object_count += 1
if object_count == object_size:
base_data.linear.x = 0.1
turn_angle = obstacle_turn(data_sns)
else:
turn_angle = turn(lr,lr_follow_lim)
if fb == check_fb and lr == check_lr:
base_data.linear.x = 0
turn_angle = 0.7*turn(lr,lr_facerec_lim)
count += 1
if count != timeout:
for new_user in range(0,6):
if listener.frameExists('torso_'+str(new_user)):
user = new_user
if lr > 0:
print 'Person Lost to the Left!'
else:
print 'Person Lost to the Right!'
print 'Sounding alarm in ' + str((timeout - i)*loop_sleep) + 's'
else:
print 'Person Lost... Sounding Alarm...'
return False
#statePub.publish(False)
#return False
else:
count = 0
if fb > fb_lim:
base_data.linear.x = speed*fb
print "Following user " + str(userid) + "..."
else:
base_data.linear.x = 0
if fb < too_close:
print "Please take a step back."
else:
print 'Stay Still for Verification'
if check_fb < fb_lim:
if lr < lr_facerec_lim and lr > -lr_facerec_lim:
print "Beginning Face Recognition for person " + str(userid) + "..."
return False
#statePub.publish(True)
else:
turn_angle = 0.5*turn(lr,lr_facerec_lim)
print 'Turning towards person ' + str(userid)
else:
time.sleep(2)
base_data.angular.z = turn_angle/fb
print str(check_fb) + '<------------fb------------->' + str(fb)
print str(check_lr) + '<------------lr------------->' + str(lr)
check_fb = fb
check_lr = lr
print 'fb = ' + str(fb)
print 'lr = ' + str(lr)
print 'speed = ' + str(base_data.linear.x)
print 'turn = ' + str(base_data.angular.z)
pub.publish(base_data)
return frame_data
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
raise IndexError
if __name__ == "__main__":
rospy.init_node('reactive_mover_node')
BASE_FRAME = '/openni_depth_frame'
FRAME = 'torso'
LAST = rospy.Duration()
name='kinect_listener'
rospy.init_node(name, anonymous=True)
listener = tf.TransformListener()
pub = rospy.Publisher('cmd_vel', Twist, queue_size=100)
keep_going = 1
loop_sleep = 0.5
user = 1
count = 0
detection_arc_start = 125
detection_arc_end = 375
detection_distance = 0.5
object_size = 5
fb_lim = 1.5
lr_follow_lim = 0.3
lr_facerec_lim = 0.1
angle = 0.7
speed = 0.2
too_close = 0.5
base_data = Twist()
fb = 0
lr = 0
check_fb = 100
check_lr = 100
timeout = 30
finished = False
max_range = 5.6
while keep_going != False:
keep_going = follow_person(user)
time.sleep(loop_sleep)
#statePub = rospy.Publisher('???', ???, queue_size=100)
|
985,523 | a5a954792967370f1d03543bb272372468d85f36 | """
{{cookiecutter.project_label}} Setup
"""
import os
import re
from setuptools import setup, find_packages
VERSION = os.getenv('VERSION', '0.0.0')
# Read in the package names from requirements.txt,
# all of which should be required for the package to function
package_deps = None
with open('requirements.txt') as requirements_file:
package_deps = frozenset([
pkg for pkg in [re.split(r'[<>=]?', re.split(r'\s+', line)[0])[0] for line in
requirements_file.read().splitlines()]
if pkg and pkg[0].isalpha()
])
setup(
name='{{cookiecutter.application_name}}',
version=VERSION,
description='{{cookiecutter.project_label}} for SocialCode',
author='SocialCode Engineering Team',
author_email='devteam@socialcodeinc.com',
url='http://www.socialcode.com',
packages=find_packages(),
classifiers=[
'Framework :: Django',
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires=package_deps,
include_package_data=True,
package_data={
'{{cookiecutter.application_name}}': [
'batch_job_definitions/*.tmpl',
'fixtures/*.json',
'migrations/*.py',
'static/*'
]
},
zip_safe=False
)
|
985,524 | 09537a9daf20b1d1447890012209ededc69c614b | ###imports
import sklearn
from SVM import SVMprocess
from KNN import KNNprocess
from NaiveBayes import NBprocess
from RandomForest import RFprocess
SVMtime,SVM_kfold_acc, SVM_loo_acc = SVMprocess()
KNNtime, KNN_kfold_acc, KNN_loo_acc = KNNprocess()
NBtime, NB_kfold_acc, NB_loo_acc = NBprocess()
RFtime, RF_kfold_acc, NB_loo_acc = RFprocess()
|
985,525 | 05341b5df1ba4f1473ce8181bc29625d0a7c3beb | # Tested
# Verified
def bidirectional(adjList, source, target):
fronts = [[source], [target]]
visited = [set([source]), set([target])]
cnt = [0, 0]
prev = [{source: None}, {target: None}]
border = []
if source == target:
border.append(source)
while all(fronts) and not border:
smaller = 0 if len(fronts[0]) < len(fronts[1]) else 1
children = []
cnt[smaller] += 1
for node in fronts[smaller]:
for child in adjList[node]:
if child in visited[not smaller]:
border.append(child)
if child not in visited[smaller]:
visited[smaller].add(child)
children.append(child)
prev[smaller][child] = node
fronts[smaller] = children
for node, parent in prev[1].items():
prev[0][parent] = node
return sum(cnt), prev[0]
|
985,526 | 3f1cfdbd832c0cb0bff9b3cce7c297b1514d8bb4 | #_*_coding:utf-8_*_
#统计模板
import stat_base
import sconf
import mysqlwrap
from httpwrap import HttpWrap
from sphinxwrap import sphinx
import rediswrap
import utils,json
#-------参数配置文件-----#
def get_cnf_val(k,dist):
"""递归取出joson配置信息值
"""
if '.' not in k :
return dist[k] if k in dist else None
kesy = k.split('.')
if kesy[0] in dist:
kk = k[k.index('.')+1:]
tmp = dist[kesy[0]]
return get_cnf_val(kk,tmp)
else:
return None
def get_host_by_data(k):
"""取出数据服务器信息
"""
host_key = get_cnf_val(k,sconf.DATA_SOURC)
if host_key:
return get_cnf_val(host_key,sconf.HOST)
return None
#-----数据操作 ----#
def reg_items_mysql(name,info):
"""items来源于mysql数据表,自动注册items
"""
k = info['source']
dbinfo = get_host_by_data(k)
if not dbinfo:
return [-1,"%s not find." % k]
dbinfo['dbname'] = k.split('.')[-1]
db = mysqlwrap.dbclass(dbinfo)
res,desc = db.connect()
if res ==-1:
return res,desc
idfield = info.get("id","id")
key_prefix = info.get("key_prefix","")
sql_item={}
sql_item['table']=info['table']
sql_item['fields']="%s,%s,%s" %(idfield,info['key'],info['name'])
where = info['where'] if 'where' in info and info['where'] else ""
sql_item['where'] = where
sql_item['limit'] = 1000
id = 0
item_total=0
while True:
sql_item['where'] = "%s and %s>%s" %(where,idfield,id) if where else "%s>%s" %(idfield,id)
res,desc = db.query(sql_item)
if res==-1 or res==0 and not desc:
break
itm=[]
for row in desc:
itm.append([row[info['name']],row[info['key']]])
print(itm[-1])
id = row[idfield]
rs,ds = stat_base.reg_items(name,itm,key_prefix)
#print(rs,ds)
if rs==0:
item_total+=len(itm)
stat_base.reg_items2redis(name)
return [0,item_total]
def init_group(name,info):
"""
通过配置文件初始化统计组(group)和统计项(items)
"""
res,desc = stat_base.reg_group(name,info)
if res==0 and desc:
for row in info['item_from']:
rs ,ds = reg_items_mysql(name,row)
return res, desc
def get_stat_data(name,info):
"""通过配置文件,获取统计数据
"""
#url提交模式
http = HttpWrap()
http.set_header('Content-type','application/json')
url = "http://192.168.10.126:1985/api/set"
for i in range(0,len(info['history_from'])):
itm = info['history_from'][i]
source = itm['source'].split('.')
if source[1] == 'sphinx':
host_info = get_host_by_data(itm['source'])
if not host_info :
return [-1,"key erro %s not in sysconfig." % row['source']]
sp = sphinx(host_info['host'],host_info['port'])
expression = itm['expression']
expression['index'] = source[2]
total_found = 0
while True:
if total_found >0:
if expression['pageSize'] * expression['page'] >=total_found:
break
expression['page'] +=1
sp.initQuery(itm['expression'])
rs = sp.RunQueries()
if rs and rs[0]['status']==0:
total_found = rs[0]['total_found']
_items = {}
for row in rs[0]['matches']:
_items["%s%s" % (itm['key_prefix'],row['attrs'][itm['key']])]=[row['attrs'][itm['value']],utils.timestamp(0,'d')]
if _items:
data = json.dumps({'gkey':name,'data':_items})
_rs = http.request(url,"POST",data)
rs = http.read(_rs)
print(rs)
else:
print(sp._error)
break
if __name__=="__main__":
import json
sconf.SYS = json.loads("".join(open('../conf/sys.json').read().split()))
sconf.HOST = json.loads("".join(open('../conf/host.json').read().split()))
sconf.DATA_SOURC = json.loads("".join(open('../conf/databases.json').read().split()))
#biz_info = json.loads("".join(open('../conf/biz.json').read().split()))
biz_info = json.loads(open('../conf/biz.json').read().replace('\n','').replace('\t',''))
#加载数据库
mysqlwrap.setup_db('default',sconf.SYS['mysql'])
mysqlwrap.pool_monitor()
rediswrap.setup_redis('default',sconf.SYS['redis']['host'],sconf.SYS['redis']['port'])
#print ( init_group('pst_corp',biz_info['pst_corp']) )
print(get_stat_data('pst_corp',biz_info['pst_corp'])) |
985,527 | 8e5df3a0943c012c7029b31cbb17caf05e426e4f | # 这是第一个注释
print("hello hello")
"""
这是一个多行注释
。。。。
。。。。
。。。。
注释结束了
"""
# 这是第二个注释 为了保证代码的可读性,注释和代码之间 至少要有 两个空格
print("hello world") # 输出欢迎信息
|
985,528 | 3473f3434e9abbc63f7acd4b0c1b91432e042a62 | /Users/Storm/Documents/Python/anaconda2/lib/python2.7/stat.py |
985,529 | dd2b3773a1793938a26e60af34afeedf72aa6bb1 | #coding:utf-8
import wx
app = wx.App()
win = wx.Frame(None,title="simple Editor",size=(410,335))
bkg = wx.Panel(win)
LoadButton = wx.Button(bkg,label='Open')
SaveButton = wx.Button(bkg,label='Save')
filename = wx.TextCtrl(bkg)
contents =wx.TextCtrl(bkg,style=wx.TE_MULTILINE|wx.HSCROLL)
hbox = wx.BoxSizer()
hbox.Add(filename,proportion =1,flag =wx.EXPAND)
hbox.Add(LoadButton,proportion =1,flag =wx.LEFT,border =5)
hbox.Add(SaveButton,proportion =1,flag =wx.LEFT,border =5)
vbox =wx.BoxSizer(wx.VERTICAL)
vbox.Add(hbox,proportion =0,flag =wx.EXPAND|wx.ALL,border =5)
vbox.Add(contents,proportion =1,flag =wx.EXPAND|wx.BOTTOM|wx.RIGHT,border =5)
bkg.SetSizer(vbox)
win.Show()
app.MainLoop()
|
985,530 | 0c465968d76ba78a96830c730424dfbd17cb70bc | import os
import sys
import time
from gooey import Gooey
from gooey import GooeyParser
import pafy
from pytube import Playlist
running = True
pListUrl = []
pathToSave = None
@Gooey(optional_cols=2,
program_name="Youtube Downloader",
dump_build_config=True,
show_success_modal=False)
def main():
settings_msg = 'YouTube URL Parsing Tool. Downloads single videos, songs, as well as playlists '
parser = GooeyParser(description=settings_msg)
parser.add_argument('--verbose', help='be verbose', dest='verbose',action='store_true', default=False)
subs = parser.add_subparsers(help='singlevid', dest='command')
singleVideo = subs.add_parser('single-video', help='Downloads a single video from a YouTube url')
singleVideo.add_argument("--YouTube_Video_Url", help="Enter your YouTube Link to Download")
singleVideo.add_argument('--Save_Location', help="Select where to download the video to", widget="DirChooser")
playlistVideo = subs.add_parser('video-playlist', help='Downloads a playlist of videos from a YouTube url')
playlistVideo.add_argument("--YouTube_Playlist_Url", help="Enter your YouTube Playlist URL to Download")
playlistVideo.add_argument('--Playlist_Save_Location', help="Select where to download the playlist to", widget="DirChooser")
singleAudio = subs.add_parser('single-audio', help='Downloads a single song from a YouTube url')
singleAudio.add_argument("--YouTube_Audio_Url", help="Enter your YouTube Link to Download")
singleAudio.add_argument('--Save_Location_Audio', help="Select where to download the video to", widget="DirChooser")
playlistAudio = subs.add_parser('audio-playlist', help='Downloads a playlist of songs from a YouTube url')
playlistAudio.add_argument("--YouTube_Playlist_Url", help="Enter your YouTube Playlist URL to Download")
playlistAudio.add_argument('--Playlist_Save_Location', help="Select where to download the playlist to", widget="DirChooser")
args = parser.parse_args()
command = args.command
print(args)
if "single-video" in command:
tUrl =args.YouTube_Video_Url
pathToSave = args.Save_Location
getOneVid(tUrl,pathToSave)
elif "video-playlist" in command:
setPlayList(args)
getAllVids(args)
elif "single-audio" in command:
getOneSong(args)
elif "audio-playlist" in command:
setPlayList(args)
getAllSongs(args)
def getOneVid(urlToDl,path):
video = pafy.new(urlToDl)
print("Getting best video")
vDL = video.getbest(preftype="mp4")
print("Got best video")
print("Now downloading: "+vDL.title)
createDir(vDL.title)
vDL.download(filepath=path,quiet=False)
print("download complete")
print("now sleeping")
time.sleep(2)
def getOneSong(urlToDl,path):
video = pafy.new(urlToDl)
print("Getting best video")
sDL = video.getbestaudio()
print("Got best video")
print("Now downloading: "+sDL.title)
createDir(sDL.title)
sDL.download(filepath=path,quiet=False)
print("download complete")
print("now sleeping")
time.sleep(2)
def setPlayList(url):
try:
pList = Playlist(url)
if not pList:
print("Unable to parse playlist url")
else:
pList.populate_video_urls()
#iteraets throug urls populated and appends them to list
for v in pList.video_urls:
tVideo = pafy.new(v)
pListUrl.append(tVideo)
print("Playlist parsed sucessfully. Ready for downloading")
except:
print("Error parsing URL object")
sys.exit(status=Exception)
def getAllVids(pListUrl):
for v in pListUrl:
getOneVid(v,pathToSave)
def getAllSongs(pListUrl):
for v in pListUrl:
getOneSong(v,pathToSave)
def createDir(dir):
if os._exists(dir):
print("Destination directory already exists")
else:
print("Creating output folder")
try:
os.mkdir(dir)
except:
print("Unable to make output folder. will save file in current directory")
dir = os.getcwd()
if __name__ == '__main__':
main() |
985,531 | b14edc051eabf7c7cf7e69dd174009de56cfd3d1 | import unittest, time
from HTMLTestRunner import HTMLTestRunner
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.multipart import MIMEMultipart
import os
#指定测试用例为当前文件夹下的test_case
test_dir='./'
discover=unittest.defaultTestLoader.discover(test_dir,pattern='test_*.py')
#========定义发送邮件====
def send_mail(file_new):
f=open(file_new, 'rb')
mail_body=f.read()
f.close
file_name=os.path.basename(file_new)
user="zhqg23@163.com"
password="zhuqiuge23"
reciver="qiuge.zhu@autodesk.com"
subject="自动化测试报告"
msg=MIMEMultipart()
msg.attach(MIMEText(mail_body,'html','utf-8'))
att=MIMEText(open(file_new, 'rb').read(),'html','utf-8')
att["Content-Type"]='application/octet-stream'
att["Content-Disposition"]='attachment;filename=%s'%file_name
#msg=MIMEMultipart('related')
msg['Subject']=subject
msg['From']=user
msg["To"]=reciver
msg.attach(att)
smtp=smtplib.SMTP_SSL('smtp.163.com',465)
smtp.login(user, password)
smtp.sendmail(user, reciver, msg.as_string())
smtp.quit()
print("email has been sent out")
#===查找测试报告目录,找到最新的测试报告文件====
def new_report(testreport_dir):
lists=os.listdir(testreport_dir)
lists.sort()
file_new=os.path.join(testreport_dir, lists[-1])
print(file_new)
return file_new
if __name__ == '__main__':
now=time.strftime("%Y-%m-%d %H_%M%S")
TestResult_dir='./TestReport/'
filename=test_dir+TestResult_dir+now+'_result.html'
fp=open(filename,'wb')
runner=HTMLTestRunner(stream=fp, title='测试报告',description='用例执行情况:')
runner.run(discover)
fp.close()
new_report=new_report(TestResult_dir)
send_mail(new_report)
|
985,532 | daf3346ee9f34eb5f9abe6b0ab4ffed9368cd9a8 | '''
1) Modifique o programa abaixo para exibir o que se pede:
x=1
while x<=3:
print(x)
x=x+1
'''
# a) Exibir os números de 1 a 100.
x = 1
print('Numeros de 1 a 100:\n')
while x <= 100:
print(x, end=' ')
x += 1
print('\n')
# b) Exibir os números de 50 a 100.
x = 50
print('Numeros de 50 a 100:\n')
while x <= 100:
print(x, end=' ')
x += 1
print('\n')
|
985,533 | 1a3052b51a4ec24437b72262c218210e5d6a4236 | from kivy.app import App
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.graphics import Color, Rectangle
from kivy.config import Config
from kivy.clock import Clock
from kivy.utils import get_random_color
from kivy.properties import ObjectProperty
from random import *
'''
obtener todas las posiciones en una grilla
usar un random choice de las posibilidad
'''
class DashboardPositions():
def __init__(self, dimensiones=None, pox=0, poy=0):
self.posiciones = []
self.screenW = dimensiones[0]
self.screenH = dimensiones[1]
def populate(self):
lines = self.screenH / 20
cols = self.screenW / 20
for y in range(lines):
for x in range(cols):
self.posiciones.append((y,x))
def transformPosition(self):
cols = self.screenW / 20
lines = self.screenH / 20
totales = cols * lines
print "totales posiciones: " + str(cols * lines)
def getPositions(self):
return self.posiciones
class DashboardHistory():
def __init__(self):
self.history = []
def __repr__(self):
return str(self.history)
def savePosition(self,t):
self.history.append(t)
class BoardUI(Widget):
def __init__(self):
Widget.__init__(self)
self.h = DashboardHistory()
self.p = DashboardPositions(Window.size)
self.p.populate()
self.l = self.p.getPositions()
def update(self, dt):
self.drawSquare()
def drawSquare(self):
if len(self.l) == 0:
Clock.unschedule(self.update)
return False
x = choice(self.l)
self.l.remove(x)
t = (x[1]*20,x[0]*20)
self.h.savePosition(t)
with self.canvas:
color = Color(random(), random(), random(), mode='rgb')
Rectangle(pos=t, size=(20, 20))
class JuegoApp(App):
def build(self):
Config.set('graphics', 'width', '550')
Config.set('graphics', 'height', '400')
Config.write()
board = BoardUI()
Clock.schedule_interval(board.update, .1)
return board
if __name__ == '__main__':
JuegoApp().run()
|
985,534 | 9472c9ba456ff7a9f4714c8d316c1fc6e23bea7e | from sys import exit,argv
from os import getcwd,system
from PyQt5.QtWidgets import QApplication,QMainWindow,QLabel,QLineEdit,QPushButton,QMessageBox
from urllib.request import urlretrieve
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Download İmage')
self.setFixedSize(500,200)
self.design()
def design(self):
###
self.lbl_url=QLabel(self)
self.lbl_url.setText('Url: ')
self.lbl_url.move(20,5)
###
self.lbl_name=QLabel(self)
self.lbl_name.setText('file name')
self.lbl_name.move(20,30)
###
self.txt_url=QLineEdit(self)
self.txt_url.move(50,10)
self.txt_url.resize(400,20)
###
self.txt_name=QLineEdit(self)
self.txt_name.move(80,35)
self.txt_name.resize(80,20)
### download button
self.btn_download=QPushButton(self)
self.btn_download.move(395,150)
self.btn_download.setText('download')
#### open files
self.btn_open=QPushButton(self)
self.btn_open.move(5,150)
self.btn_open.setText('Open file')
self.btn_open.setEnabled(False)
###message box
self.messsage=QMessageBox(self)
###
self.btn_download.clicked.connect(self.download)
self.btn_open.clicked.connect(self.openfile)
def download(self):
if self.txt_name.text()=='' or self.txt_url.text()=='':
self.messsage.setText('please do not leave blank ☻ ')
self.messsage.show()
else:
self.path=getcwd()+'/'+self.txt_name.text()+'.png'
try:
urlretrieve(self.txt_url.text(),self.path)
self.btn_open.setEnabled(True)
except :
self.messsage.setText('error')
self.messsage.show()
self.txt_name.setText('')
self.txt_url.setText('')
def openfile(self):
system(f'start {self.path}')
self.btn_open.setEnabled(False)
if __name__=='__main__':
app=QApplication(argv)
win=Window()
win.show()
exit(app.exec_())
|
985,535 | 1093805058f3bf8ab3284a60fc65ce1c3d341af7 |
# Iterative approach
def reverseList(arr, start, end):
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
# recursive approach
def recursiveReverseList(arr, start, end):
if start >= end:
return
temp = arr[start]
arr[start] = arr[end]
arr[end] = temp
recursiveReverseList(arr, start+1, end-1)
if __name__ == "__main__":
A = [1, 2, 3, 4, 5, 6]
print(A)
# reverseList(A, 0, len(A)-1)
recursiveReverseList(A, 0, len(A)-1)
print("Reversed list is")
print(A)
|
985,536 | 95c12f6c727e54246d40ca3a0f1b207d8dbae4fe | from django.db import models
import datetime as dt
from django.contrib.auth.models import User
from tinymce.models import HTMLField
from cloudinary.models import CloudinaryField
class Tag(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=60)
editor = models.ForeignKey(User,on_delete=models.CASCADE)
post = HTMLField()
tags = models.ManyToManyField(Tag)
pub_date = models.DateTimeField(auto_now_add=True)
article_image = CloudinaryField('article_image')
@classmethod
def today_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date = today)
return news
@classmethod
def days_news(cls,date):
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news = cls.objects.filter(title__icontains=search_term)
return news
class NewsRecipients(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField()
class MoringaMerch(models.Model):
name = models.CharField(max_length=40)
description = models.TextField()
price = models.DecimalField(decimal_places=2, max_digits=20)
|
985,537 | 67727abea626f8164024e5be005b50f68b7504d2 | # Generated by Django 3.2.9 on 2021-12-07 21:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("courses", "0032_auto_20211004_1733"),
]
operations = [
migrations.AddField(
model_name="courserun",
name="catalog_visibility",
field=models.CharField(
choices=[
(
"course_and_search",
"course_and_search - show on the course page and include in search results",
),
(
"course_only",
"course_only - show on the course page and hide from search results",
),
(
"hidden",
"hidden - hide on the course page and from search results",
),
],
default="course_and_search",
max_length=20,
verbose_name="catalog visibility",
),
),
]
|
985,538 | 59d46db18edbaac50618dfdbf5a1d9ec05760086 | def conta_bigramas(s):
i = 0
a = dict()
while(i < len(s)-1):
bi=s[i] + s [i+1]
if bi in a:
a[bi]+=1
else:
a[bi]=1
i+=1
return a
|
985,539 | 1c5a0ea7c0a9cbf16ac7c0c5d5660c6bfda2563b | import mathpack.MymathFunctions
addres=mathpack.MymathFunctions.add(10,20)
print(addres)
#from mathpack.MymathFunctions import *
#data=add(10,20)
#print(data)
#data1=sub(20,10)
#print(data1) |
985,540 | 41a3dd1a12ed1099ed054a10e6647a5dbd1718db | # -*- coding: utf-8 -*-
import math
f= float(input('digite f:'))
l= float(input('digite l:'))
q= float(input('digite q:'))
delta= float(input('digite delta:'))
v= float(input('digite v:'))
d=(8*f*l*q**2/math.pi**2*9.81*delta)/(1/5)
rey=(4*q/math.pi*d*v)
k=0.25/(math.log10(0.000002/3.7*d+5.74/rey**0.9))**2
print(d)
print(rey)
print(k) |
985,541 | 610ecf2b8e695e21164c8697898f1737b2353303 | from rest_framework import generics, status, permissions
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.throttling import ScopedRateThrottle
from .models import Usuario, Conta, Cartao, Fatura, Lancamento
from .serializers import UsuarioSerializer, ContaSerializer, CartaoSerializer, FaturaSerializer, LancamentoSerializer
class UsuarioList(generics.ListAPIView):
queryset = Usuario.objects.all()
serializer_class = UsuarioSerializer
name = 'usuario-list'
def post(self, request):
try:
user = Usuario.objects.create(nome=request.data['nome'], genero=request.data['genero'], email=request.data['email'])
user.set_password(request.data['password'])
user.save()
return Response({'Message' : 'Usuário cadastrado!'}, status=status.HTTP_201_CREATED)
except Exception:
return Response({'Message' : 'Erro ao cadastrar usuário!'}, status=status.HTTP_400_BAD_REQUEST)
class UsuarioDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Usuario.objects.all()
serializer_class = UsuarioSerializer
name = 'usuario-detail'
permission_classes = (permissions.IsAdminUser,)
class ContaList(generics.ListCreateAPIView):
queryset = Conta.objects.all()
serializer_class = ContaSerializer
name = 'conta-list'
class ContaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Conta.objects.all()
serializer_class = ContaSerializer
name = 'conta-detail'
class CartaoList(generics.ListCreateAPIView):
queryset = Cartao.objects.all()
serializer_class = CartaoSerializer
name = 'cartao-list'
class CartaoDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Cartao.objects.all()
serializer_class = CartaoSerializer
name = 'cartao-detail'
class FaturaList(generics.ListCreateAPIView):
queryset = Fatura.objects.all()
serializer_class = FaturaSerializer
name = 'fatura-list'
class FaturaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Fatura.objects.all()
serializer_class = FaturaSerializer
name = 'fatura-detail'
class LancamentoList(generics.ListCreateAPIView):
queryset = Lancamento.objects.all()
serializer_class = LancamentoSerializer
name = 'lancamento-list'
class LancamentoDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Lancamento.objects.all()
serializer_class = LancamentoSerializer
name = 'lancamento-detail'
|
985,542 | 53e1889f94c354e5dcee49dd247f60386a07a2b5 | """
Add the Prime Numbers that are Anagram in the Range of 0 1000 in a Queue using
the Linked List and Print the Anagrams from the Queue. Note no Collection Library
can be used
"""
from Data_Structure_Programs.queue import Queue, Node
from Data_Structure_Programs.Prime_Number import Prime
obj = Queue()
# creating object of prime class
prime_obj = Prime()
# prime number list
prime_anagram = []
# creating prime number list in given range
prime_list = prime_obj.prime(0, 1000)
for num in prime_list:
if num <= 10:
continue
number = prime_obj.anagram(num)
if prime_obj.prime_check(number) and 0 <= number <= 1000:
prime_anagram.append(number)
prime_anagram.append(num)
prime_list.remove(number)
# length of prime anagram list
length = len(prime_anagram)
# Adding the prime anagram into queue
for number in range(length):
num = Node(prime_anagram[number])
obj.enqueue(num)
# printing the prime anagram form Queue obj
obj.traverse()
|
985,543 | 18e8d588bc49792ec2d6fd9c89c547f35608c015 | import subprocess
import os
import shutil
import unittest
import shlex
# testdirectory = '/Users/jibrankalia/tmp/ls-test'
# testdirectory = '/tmp/ls-test'
testdirectory = '~/tmp/ls-test'
def buildEnv(directory=testdirectory):
if not os.path.exists(directory):
os.makedirs(directory)
def cleanEnv(directory=testdirectory):
shutil.rmtree(directory)
def setupEnv(command, directory=testdirectory):
try:
subprocess.run(shlex.split(command), cwd=directory)
except PermissionError:
pass
except FileNotFoundError:
buildEnv()
def mainLS(args):
allArgs = shlex.split('/bin/ls ' + args)
lsreturn = subprocess.run(allArgs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=testdirectory)
return (lsreturn.stdout.decode())
def testLS(args, directory=testdirectory):
allArgs = shlex.split(os.getcwd() + '/ft_ls ' + args)
lsreturn = subprocess.run(allArgs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=testdirectory)
return (lsreturn.stdout.decode())
class TestLSCompare(unittest.TestCase):
maxDiff=None
def setUp(self):
buildEnv()
def tearDown(self):
cleanEnv()
def testSimple(self):
setupEnv('touch test')
args = '-1'
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def testSimple3(self):
setupEnv('mkdir - dir')
args = '-lr'
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_06_test_opt_rR(self):
setupEnv('mkdir -p .a .b .c && mkdir -p a b c')
args = '-1rR'
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def testSimple2(self):
setupEnv('touch test')
args = '-lr'
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_07_test_opt_t_0(self):
setupEnv("touch -t 201312101830.55 a")
setupEnv("touch -t 201212101830.55 b")
setupEnv("touch -t 201412101830.55 c")
setupEnv("touch -t 201411221830.55 d")
setupEnv("touch -t 201405212033.55 e")
setupEnv("touch -t 201409221830.55 f")
setupEnv("touch -t 202007221830.55 g")
setupEnv("touch -t 300012101830.55 h")
args = '-1t'
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_07_test_opt_t_6(self):
setupEnv("touch C")
setupEnv("touch -t 201212101830.55 c")
setupEnv("mkdir -p sbox sbox1")
setupEnv("touch -t 201312101830.55 B")
setupEnv("touch -t 201312101830.55 a")
args = "-1t a C B sbox sbox1"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_07_test_opt_t_7(self):
setupEnv("touch C")
setupEnv("touch -t 201212101830.55 c")
setupEnv("mkdir -p sbox sbox1")
setupEnv("touch -t 201312101830.55 B")
setupEnv("touch -t 201312101830.55 a")
args = "-1t"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_08_test_opt_l_4(self):
setupEnv("mkdir -p dir/.hdir")
setupEnv("touch dir/.hdir/file")
args = "-la dir"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_11_test_single_file_1(self):
setupEnv("touch aaa")
args = "-l aaa"
expected = mainLS(args)
mine = testLS(args)
self.assertEqual(mine, expected)
def test_13_test_hyphen_hard_1(self):
setupEnv("touch - file")
args = "-1"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_13_test_hyphen_hard_2(self):
setupEnv("touch - file")
args = "-1 -"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_13_test_hyphen_hard_3(self):
setupEnv("touch - file")
args = "-1 --"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_21_test_symlink_1(self):
setupEnv("mkdir a")
setupEnv("ln -s a b")
setupEnv("rm -rf a")
args = "-1 b"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_21_test_symlink_2(self):
setupEnv("mkdir mydir")
setupEnv("ln -s mydir symdir")
setupEnv("touch mydir/file1 mydir/file1 mydir/file2 mydir/file3 mydir/file4 mydir/file5 ")
args = "-1 symdir"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_22_test_no_username(self):
args = "-l /usr/local/bin/node"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_24_test_multiple_files(self):
setupEnv("touch a b C D")
args = "-1 ./ ."
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_25_perm_special_bits(self):
setupEnv("touch file2 && chmod 1777 file2")
args = "-l"
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def test_sys_00_test_user_bin(self):
args = '-lR /usr/bin'
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
@unittest.skip("Dev Null Output is annoying")
def test_sys_01_test_dev(self):
args = '-1l /dev | grep -v io8 | grep -v autofs_nowait | sed -E \"s/ +/ /g\"'
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
@unittest.skip("Dev Null Output is annoying")
def test_08_test_opt_l_5(self):
args = "-l"
setupEnv("touch .a")
setupEnv("dd bs=2 count=14450 if=/dev/random of=.a >/dev/null 2>&1")
setupEnv("ln -s .a b")
expected = mainLS(args)
self.assertEqual(testLS(args), expected)
def uniqueEnv():
currentdir = "/Users/jibrankalia/project_ls/test"
buildEnv(currentdir)
setupEnv("touch - file", currentdir)
if __name__ == '__main__':
uniqueEnv()
unittest.main()
|
985,544 | abebd29206cc59c14d8d51917a5f8c2ff95bfa7b | from PIL import Image
import os
import numpy as np
import torch
import torch.utils.data as Data
from torchvision import transforms
import json
import aConfigration
from tqdm import tqdm
dataRootPath = '../datas'
trainPath = '/AgriculturalDisease_trainingset'
validPath = '/AgriculturalDisease_validationset'
testPath = '/AgriculturalDisease_testA'
commonImgPath = '/images/'
trainLabel = '/AgriculturalDisease_train_annotations.json'
validLabel = '/AgriculturalDisease_validation_annotations.json'
dataSavedPath = "/data_np_saved"
dataSavedImgTNp = "/imgTNp.npy"
dataSavedImgVNp = "/imgVNp.npy"
dataSavedLabTNp = "/labTNp.npy"
dataSavedLabVNp = "/labVNp.npy"
dataSavedTestNp = "/testNp.npy"
dataSavedTestName = "/testImgName.npy"
def main():
readTrainAndValPic()
readTestPic()
# 没用到。
# 由于transforms必须放到dataset里,才能保证每次epoch都能调用。
# 但本代码里的dataset操作的是numpy,无法做增强。
# 因为需要提前将图片转为numpy,存入本地,才不用每次都读取图片。
trans = transforms.Compose([
transforms.RandomResizedCrop(aConfigration.IMAGE_SIZE),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(10),
transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)
])
transCopyV = transforms.Compose([
transforms.Resize((aConfigration.IMAGE_SIZE_COPY, aConfigration.IMAGE_SIZE_COPY)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
transCopyT = transforms.Compose([
transforms.Resize((aConfigration.IMAGE_SIZE_COPY, aConfigration.IMAGE_SIZE_COPY)),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomAffine(45),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def check_contain_chinese(check_str):
for ch in check_str:
# for ch in check_str.encode('utf-8'):
if '\u4e00' <= ch <= '\u9fff':
return True
return False
def readTrainAndValLabel():
trainJFile = open(dataRootPath + trainPath + trainLabel)
valJFile = open(dataRootPath + validPath + validLabel)
tLabDict = json.load(trainJFile)
vLabDict = json.load(valJFile)
# print("see "+ str(tLabDict[:20]))
# print("see see "+ str(type(tLabDict)))
return tLabDict, vLabDict
def readTrainAndValPic():
# 获取图片名方法一,文件夹里读。 (弃用) 因为文件夹里读出的图片顺序和标签列表中的图片名顺序不一致)
# listPicTrain = os.listdir(dataRootPath + trainPath + commonImgPath)
# listPicVal = os.listdir(dataRootPath + validPath + commonImgPath)
# 获取图片名方法二,读标签json文件里的列表。
# 类似[{'disease_class': 1, 'image_id': '62fd8bf4d53a1b94fbac16738406f10b.jpg'}, {'disease_class': 1, 'image_id': '0bdec5cccbcade6b6e94087cb5509d98.jpg'},.....]
listPicTrain = []
listPicVal = []
tLabDict, vLabDict = readTrainAndValLabel()
# 少量数据预览模式
if(aConfigration.PREVIEW):
tLabDict = tLabDict[:aConfigration.PREVIEW_TRAIN_NUM]
vLabDict = vLabDict[:aConfigration.PREVIEW_TRAIN_NUM]
for TlabItem in tLabDict:
listPicTrain.append(TlabItem['image_id'])
for VlabItem in vLabDict:
listPicVal.append(VlabItem['image_id'])
# 去除文件名带有“副本”字样的图片
# 后来发现有中文的不一定是重复的,不删他们了保留着吧。
# fubenNumT= 0
# fubenNumV= 0
#
# for pic1 in listPicTrain:
# if check_contain_chinese(str(pic1)):
# listPicTrain.remove(pic1)
# print("看下中文:"+str(pic1))
# fubenNumT += 1
# for pic2 in listPicVal:
# if check_contain_chinese(str(pic2)):
# listPicVal.remove(pic2)
# fubenNumV += 1
# print('the train pic list\'s length is ' + str(len(listPicTrain)) +
# ' \nand deleted pic is '+str(fubenNumT),
# ' \nthe val pic list\'s length is ' + str(len(listPicVal)) +
# ' \nand deleted pic is ' + str(fubenNumV)
# )
print('the train pic list\'s length is ' + str(len(listPicTrain)) +
' \nthe val pic list\'s length is ' + str(len(listPicVal))
)
'''制作标签集'''
i = 0
j = 0
# labTNp = np.zeros(len(tLabDict))
# 为了扩大数据,将验证集加入训练集
labTNp = np.zeros(len(tLabDict)+len(vLabDict))
for labTItem in tLabDict:
lab = labTItem['disease_class']
labTNp[i] = lab
i += 1
labVNp = np.zeros(len(vLabDict))
for labVItem in vLabDict:
lab2 = labVItem['disease_class']
labVNp[j] = lab2
j += 1
# 为了扩大数据,将验证集加入训练集
labTNp[i] = lab2
i += 1
'''制作数据集'''
# imgTNp = np.zeros([len(listPicTrain), aConfigration.IMAGE_SIZE, aConfigration.IMAGE_SIZE, 3], dtype=np.uint8)
# 为了扩大数据,将验证集加入训练集
imgTNp = np.zeros([len(listPicTrain + listPicVal), aConfigration.IMAGE_SIZE, aConfigration.IMAGE_SIZE, 3], dtype=np.uint8)
imgVNp = np.zeros([len(listPicVal), aConfigration.IMAGE_SIZE, aConfigration.IMAGE_SIZE, 3], dtype=np.uint8)
t = 0
# 处理训练集图片:挨个处理大小,转换numpy
for pic in tqdm(listPicTrain):
imageT = Image.open(dataRootPath + trainPath + commonImgPath + pic)
# 转换非RGB图片
if imageT.mode != 'RGB':
imageT = imageT.convert('RGB')
# 为添加数据增强1,Image转numpy移除(为了将Image传到DataSet中)
# imageT = imageT.resize((aConfigration.IMAGE_SIZE, aConfigration.IMAGE_SIZE)) #原始图片 shape(581, 256, 3)
# imageT = trans(imageT) # data augmentation 数据增强 ##放在这里错,只执行一次,并没有增强数据。
# imageT = np.asarray(imageT)
# imgTNp[t, :, :, :] = imageT
# t += 1
v = 0
# # 处理验证集图片:挨个处理大小,转换numpy
for pic in tqdm(listPicVal):
imageV = Image.open(dataRootPath + validPath + commonImgPath + pic)
# 转换非RGB图片
if imageV.mode != 'RGB':
imageV = imageV.convert('RGB')
# 为添加数据增强2,Image转numpy移除(为了将Image传到DataSet中)
# imageV = imageV.resize((aConfigration.IMAGE_SIZE, aConfigration.IMAGE_SIZE))
# imageV = np.asarray(imageV)
# imgVNp[v, :, :, :] = imageV
# v += 1
#
# # 为了扩大数据,将验证集加入训练集
# imgTNp[t, :, :, :] = imageV
# t += 1
#
# print('look '+ str(len(imgTNp))
# + ' '
# + str(len(imgVNp)))
# if not os.path.exists(dataRootPath + dataSavedPath):
# os.mkdir(dataRootPath + dataSavedPath)
# 为添加数据增强3,舍弃图片一次加载储存功能。
# np.save(dataRootPath + dataSavedPath + dataSavedImgTNp, imgTNp)
# np.save(dataRootPath + dataSavedPath + dataSavedImgVNp, imgVNp)
# np.save(dataRootPath + dataSavedPath + dataSavedLabTNp, labTNp)
# np.save(dataRootPath + dataSavedPath + dataSavedLabVNp, labVNp)
# return imgTNp, imgVNp, labTNp, labVNp
return imageT, imageV, labTNp, labVNp
def readTestPic():
testFiles = os.listdir(dataRootPath + testPath + commonImgPath)
# testFiles = os.listdir(dataRootPath + validPath + commonImgPath) #temper for output eval predion json
if aConfigration.PREVIEW_TEST:
testFiles = testFiles[:aConfigration.PREVIEW_TEST_NUM]
testImgNp = np.zeros([len(testFiles), aConfigration.IMAGE_SIZE, aConfigration.IMAGE_SIZE, 3])
k = 0
for testFile in tqdm(testFiles):
testImg = Image.open(dataRootPath + testPath + commonImgPath + testFile)
# testImg = Image.open(dataRootPath + validPath + commonImgPath + testFile) #temper for output eval predion json
testImg = testImg.resize((aConfigration.IMAGE_SIZE, aConfigration.IMAGE_SIZE))
if testImg.mode != 'RGB':
testImg = testImg.convert('RGB')
testnp = np.asarray(testImg)
testImgNp[k, :, :, :] = testnp
k += 1
if not os.path.exists(dataRootPath + dataSavedPath):
os.mkdir(dataRootPath + dataSavedPath)
np.save(dataRootPath + dataSavedPath + dataSavedTestNp, testImgNp)
np.save(dataRootPath + dataSavedPath + dataSavedTestName, testFiles)
return testImgNp, testFiles
class myDataSet(Data.Dataset):
def __init__(self, type):
# 为添加数据增强4,舍弃图片一次加载储存功能。
imgTNp, imgVNp, labTNp, labVNp = readTrainAndValPic()
# if aConfigration.NEED_RESTART_READ_TRAIN_DATA:
# imgTNp, imgVNp, labTNp, labVNp = readTrainAndValPic()
# else:
# imgTNp = np.load(dataRootPath + dataSavedPath + dataSavedImgTNp)
# imgVNp = np.load(dataRootPath + dataSavedPath + dataSavedImgVNp)
# labTNp = np.load(dataRootPath + dataSavedPath + dataSavedLabTNp)
# labVNp = np.load(dataRootPath + dataSavedPath + dataSavedLabVNp)
if type == aConfigration.TRAIN:
self.x = imgTNp
self.y = labTNp
elif type == aConfigration.EVAL:
self.x = imgVNp
self.y = labVNp
def __getitem__(self, item):
# return torch.from_numpy(self.x[item]), self.y[item]
if type == aConfigration.TRAIN:
imageT = self.x
imageT = transCopyT(imageT)
labelT = self.y
return imageT, labelT
elif type == aConfigration.EVAL:
imageV = self.x
imageV - transCopyV(imageV)
labelV = self.y
return imageV, labelV
def __len__(self):
return len(self.x)
class myTestSet(Data.Dataset):
def __init__(self):
if aConfigration.NEED_RESTART_READ_TEST_DATA:
testNp, testImgName = readTestPic()
else:
testNp = np.load(dataRootPath + dataSavedPath + dataSavedTestNp)
testImgName = np.load(dataRootPath + dataSavedPath + dataSavedTestName)
testImgName = list(testImgName)
self.x = testNp
self.y = testImgName
def __getitem__(self, item):
return torch.from_numpy(self.x[item]), self.y[item]
def __len__(self):
return len(self.x)
if __name__ == '__main__':
main()
|
985,545 | 6542678c25c7b3cc1096e689ad476ee141aaa40a | import time
import inspect
from functools import partial
import torch
import pandas as pd
from loguru import logger
from sklearn.metrics import precision_recall_fscore_support
def to_device(x, device):
if not isinstance(x, dict):
return x
new_x = {}
for k, v in x.items():
if isinstance(v, torch.Tensor):
new_v = v.to(device)
elif isinstance(v, (tuple, list)) and len(v) > 0 and isinstance(v[0], torch.Tensor):
new_v = [i.to(device) for i in v]
else:
new_v = v
new_x[k] = new_v
return new_x
def aggregate_dict(x):
"""Aggregate a list of dict to form a new dict"""
agg_x = {}
for ele in x:
assert isinstance(ele, dict)
for k, v in ele.items():
if k not in agg_x:
agg_x[k] = []
if isinstance(v, (tuple, list)):
agg_x[k].extend(list(v))
else:
agg_x[k].append(v)
# Stack if possible
new_agg_x = {}
for k, v in agg_x.items():
try:
v = torch.cat(v, dim=0)
except Exception:
pass
new_agg_x[k] = v
return new_agg_x
def raise_or_warn(action, msg):
if action == "raise":
raise ValueError(msg)
else:
logger.warning(msg)
class ConfigComparer:
"""Compare two config dictionaries. Useful for checking when resuming from
previous session."""
_to_raise_error = [
"model->model_name_or_path"
]
_to_warn = [
"model->config_name", "model->tokenizer_name", "model->cache_dir", "model->freeze_base_model", "model->fusion",
"model->lambdas"
]
def __init__(self, cfg_1, cfg_2):
self.cfg_1 = cfg_1
self.cfg_2 = cfg_2
def compare(self):
for components, action in \
[(self._to_raise_error, "raise"), (self._to_warn, "warn")]:
for component in components:
curr_scfg_1, curr_scfg_2 = self.cfg_1, self.cfg_2 # subconfigs
for key in component.split("->"):
if key not in curr_scfg_1 or key not in curr_scfg_2:
raise ValueError(
f"Component {component} not found in config file.")
curr_scfg_1 = curr_scfg_1[key]
curr_scfg_2 = curr_scfg_2[key]
if curr_scfg_1 != curr_scfg_2:
msg = (f"Component {component} is different between "
f"two config files\nConfig 1: {curr_scfg_1}\n"
f"Config 2: {curr_scfg_2}.")
raise_or_warn(action, msg)
return True
def collect(config, args, collected):
"""Recursively collect each argument in `args` from `config` and write to
`collected`."""
if not isinstance(config, dict):
return
keys = list(config.keys())
for arg in args:
if arg in keys:
if arg in collected: # already collected
raise RuntimeError(f"Found repeated argument: {arg}")
collected[arg] = config[arg]
for key, sub_config in config.items():
collect(sub_config, args, collected)
def from_config(main_args=None, requires_all=False):
"""Wrapper for all classes, which wraps `__init__` function to take in only
a `config` dict, and automatically collect all arguments from it. An error
is raised when duplication is found. Note that keyword arguments are still
allowed, in which case they won't be collected from `config`.
Parameters
----------
main_args : str
If specified (with "a->b" format), arguments will first be collected
from this subconfig. If there are any arguments left, recursively find
them in the entire config. Multiple main args are to be separated by
",".
requires_all : bool
Whether all function arguments must be found in the config.
"""
global_main_args = main_args
if global_main_args is not None:
global_main_args = global_main_args.split(",")
global_main_args = [args.split("->") for args in global_main_args]
def decorator(init):
init_args = inspect.getfullargspec(init)[0][1:] # excluding self
def wrapper(self, config=None, main_args=None, **kwargs):
# Add config to self
if config is not None:
self.config = config
# Get config from self
elif getattr(self, "config", None) is not None:
config = self.config
if main_args is None:
main_args = global_main_args
else:
# Overwrite global_main_args
main_args = main_args.split(",")
main_args = [args.split("->") for args in main_args]
collected = kwargs # contains keyword arguments
not_collected = [arg for arg in init_args if arg not in collected]
# Collect from main args
if config is not None and main_args is not None \
and len(not_collected) > 0:
for main_arg in main_args:
sub_config = config
for arg in main_arg:
if arg not in sub_config:
break # break when `main_args` is invalid
sub_config = sub_config[arg]
else:
collect(sub_config, not_collected, collected)
not_collected = [arg for arg in init_args
if arg not in collected]
if len(not_collected) == 0:
break
# Collect from the rest
not_collected = [arg for arg in init_args if arg not in collected]
if config is not None and len(not_collected) > 0:
collect(config, not_collected, collected)
# Validate
if requires_all and (len(collected) < len(init_args)):
not_collected = [arg for arg in init_args
if arg not in collected]
raise RuntimeError(
f"Found missing argument(s) when initializing "
f"{self.__class__.__name__} class: {not_collected}.")
# Call function
return init(self, **collected)
return wrapper
return decorator
class Timer:
def __init__(self):
self.global_start_time = time.time()
self.start_time = None
self.last_interval = None
self.accumulated_interval = None
def start(self):
assert self.start_time is None
self.start_time = time.time()
def end(self):
assert self.start_time is not None
self.last_interval = time.time() - self.start_time
self.start_time = None
# Update accumulated interval
if self.accumulated_interval is None:
self.accumulated_interval = self.last_interval
else:
self.accumulated_interval = (
0.9 * self.accumulated_interval + 0.1 * self.last_interval)
def get_last_interval(self):
return self.last_interval
def get_accumulated_interval(self):
return self.accumulated_interval
def get_total_time(self):
return time.time() - self.global_start_time
def compute_metrics_from_inputs_and_outputs(inputs, outputs, tokenizer, save_csv_path=None, show_progress=False):
if isinstance(inputs, dict):
inputs = [inputs]
if isinstance(outputs, dict):
outputs = [outputs]
input_ids_all = []
has_gt = "l1_cls_gt" in inputs[0]
l1_cls_preds_all, l2_cls_preds_all, l3_cls_preds_all = [], [], []
l1_probs_preds_all, l2_probs_preds_all, l3_probs_preds_all = [], [], []
if has_gt:
l1_cls_gt_all, l2_cls_gt_all, l3_cls_gt_all = [], [], []
if show_progress:
from tqdm import tqdm
else:
tqdm = lambda x, **kwargs: x
for inputs_i, outputs_i in tqdm(zip(inputs, outputs), desc="Processing predictions"): # by batch
input_ids = inputs_i["input_ids"]
input_ids_all.append(input_ids)
# Groundtruths
if has_gt:
l1_cls_gt, l2_cls_gt, l3_cls_gt = inputs_i["l1_cls_gt"], inputs_i["l2_cls_gt"], inputs_i["l3_cls_gt"]
l1_cls_gt_all.append(l1_cls_gt)
l2_cls_gt_all.append(l2_cls_gt)
l3_cls_gt_all.append(l3_cls_gt)
# Predictions
l1_cls_preds = outputs_i["l1_cls_preds"]
l1_probs_preds, l1_cls_preds = l1_cls_preds.max(dim=1) # (B,)
l1_cls_preds_all.append(l1_cls_preds)
l1_probs_preds_all.append(l1_probs_preds)
l2_cls_preds = outputs_i["l2_cls_preds"]
l2_probs_preds, l2_cls_preds = l2_cls_preds.max(dim=1) # (B,)
l2_cls_preds_all.append(l2_cls_preds)
l2_probs_preds_all.append(l2_probs_preds)
l3_cls_preds = outputs_i["l3_cls_preds"]
l3_probs_preds, l3_cls_preds = l3_cls_preds.max(dim=1) # (B,)
l3_cls_preds_all.append(l3_cls_preds)
l3_probs_preds_all.append(l3_probs_preds)
# Combine results
l1_cls_preds_all = torch.cat(l1_cls_preds_all, dim=0) # (N,), where N is length of the dataset
l1_probs_preds_all = torch.cat(l1_probs_preds_all, dim=0) # (N,)
l2_cls_preds_all = torch.cat(l2_cls_preds_all, dim=0) # (N,)
l2_probs_preds_all = torch.cat(l2_probs_preds_all, dim=0) # (N,)
l3_cls_preds_all = torch.cat(l3_cls_preds_all, dim=0) # (N,)
l3_probs_preds_all = torch.cat(l3_probs_preds_all, dim=0) # (N,)
if has_gt:
l1_cls_gt_all = torch.cat(l1_cls_gt_all, dim=0) # (N,)
l2_cls_gt_all = torch.cat(l2_cls_gt_all, dim=0) # (N,)
l3_cls_gt_all = torch.cat(l3_cls_gt_all, dim=0) # (N,)
# Calculate metrics
if has_gt:
metrics = {}
preds_data = [
("l1", l1_cls_preds_all, l1_cls_gt_all),
("l2", l2_cls_preds_all, l2_cls_gt_all),
("l3", l3_cls_preds_all, l3_cls_gt_all),
]
for level, level_preds, level_gt in preds_data:
for t in ["micro", "macro"]:
precision, recall, f1, support = precision_recall_fscore_support(
level_gt.cpu().numpy(), level_preds.cpu().numpy(), average=t, zero_division=1)
metrics[f"{level}_precision_{t}"] = precision
metrics[f"{level}_recall_{t}"] = recall
metrics[f"{level}_f1_{t}"] = f1
# Generate prediction csv if needed
if save_csv_path is not None:
decode = partial(tokenizer.decode, skip_special_tokens=True,
clean_up_tokenization_spaces=True)
input_i, input_j = 0, -1
records = []
for i, (l1_cls_pred, l1_prob_pred, l2_cls_pred, l2_prob_pred, l3_cls_pred, l3_prob_pred) \
in enumerate(zip(l1_cls_preds_all, l1_probs_preds_all,
l2_cls_preds_all, l2_probs_preds_all,
l3_cls_preds_all, l3_probs_preds_all)):
# If has groundtruths
if has_gt:
l1_cls_gt = l1_cls_gt_all[i]
l2_cls_gt = l2_cls_gt_all[i]
l3_cls_gt = l3_cls_gt_all[i]
# Get index of the `input_ids_all`
input_j += 1
if input_j >= len(input_ids_all[input_i]):
input_i += 1
input_j = 0
input_ids = input_ids_all[input_i][input_j].tolist()
record = {
"text": decode(input_ids),
}
if has_gt:
to_iterate = [
(l1_cls_pred, l1_prob_pred, l1_cls_gt, "l1"),
(l2_cls_pred, l2_prob_pred, l2_cls_gt, "l2"),
(l3_cls_pred, l3_prob_pred, l3_cls_gt, "l3"),
]
for cls_pred, prob_pred, cls_gt, col_name in to_iterate:
record.update({
f"{col_name}_gt": cls_gt, f"{col_name}_pred": cls_pred,
f"{col_name}_pred_prob": prob_pred,
})
else:
to_iterate = [
(l1_cls_pred, l1_prob_pred, "l1"),
(l2_cls_pred, l2_prob_pred, "l2"),
(l3_cls_pred, l3_prob_pred, "l3"),
]
for cls_pred, prob_pred, col_name in to_iterate:
record.update({
f"{col_name}_pred": cls_pred,
f"{col_name}_pred_prob": prob_pred,
})
records.append(record)
df = pd.DataFrame.from_records(records)
df.to_csv(save_csv_path, index=False)
if has_gt:
return metrics
|
985,546 | 3ede7c9d2081e88199c5008edd784e2d8676f96b | class Config(object):
DEBUG = True
secret_key = 'secret'
|
985,547 | 0a03806dae46dc915bffe3d712b19c4705f693d7 | #
# This file is part of VIRL 2
# Copyright (c) 2019-2023, Cisco Systems, Inc.
# All rights reserved.
#
# Python bindings for the Cisco VIRL 2 Network Simulation Platform
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import logging
import time
import warnings
from functools import total_ordering
from typing import TYPE_CHECKING, Optional
from ..utils import check_stale, locked
from ..utils import property_s as property
if TYPE_CHECKING:
import httpx
from .interface import Interface
from .lab import Lab
from .node import Node
_LOGGER = logging.getLogger(__name__)
@total_ordering
class Link:
def __init__(
self,
lab: Lab,
lid: str,
iface_a: Interface,
iface_b: Interface,
label: Optional[str] = None,
) -> None:
"""
A VIRL2 network link between two nodes, connecting
to two interfaces on these nodes.
:param lab: the lab object
:param lid: the link ID
:param iface_a: the first interface of the link
:param iface_b: the second interface of the link
:param label: the link label
"""
self._id = lid
self._interface_a = iface_a
self._interface_b = iface_b
self._label = label
self.lab = lab
self._session: httpx.Client = lab.session
self._state: Optional[str] = None
self._stale = False
self.statistics = {
"readbytes": 0,
"readpackets": 0,
"writebytes": 0,
"writepackets": 0,
}
def __str__(self):
return f"Link: {self._label}{' (STALE)' if self._stale else ''}"
def __repr__(self):
return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format(
self.__class__.__name__,
str(self.lab),
self._id,
self._interface_a,
self._interface_b,
self._label,
)
def __eq__(self, other: object):
if not isinstance(other, Link):
return False
return self._id == other._id
def __lt__(self, other: object):
if not isinstance(other, Link):
return False
return self._id < other._id
def __hash__(self):
return hash(self._id)
@property
def id(self):
return self._id
@property
def interface_a(self):
return self._interface_a
@property
def interface_b(self):
return self._interface_b
@property
@locked
def state(self) -> Optional[str]:
self.lab.sync_states_if_outdated()
if self._state is None:
url = self.base_url
self._state = self._session.get(url).json()["state"]
return self._state
@property
def readbytes(self) -> int:
self.lab.sync_statistics_if_outdated()
return self.statistics["readbytes"]
@property
def readpackets(self) -> int:
self.lab.sync_statistics_if_outdated()
return self.statistics["readpackets"]
@property
def writebytes(self) -> int:
self.lab.sync_statistics_if_outdated()
return self.statistics["writebytes"]
@property
def writepackets(self) -> int:
self.lab.sync_statistics_if_outdated()
return self.statistics["writepackets"]
@property
def node_a(self) -> Node:
self.lab.sync_topology_if_outdated()
return self.interface_a.node
@property
def node_b(self) -> Node:
self.lab.sync_topology_if_outdated()
return self.interface_b.node
@property
@locked
def nodes(self) -> tuple[Node, Node]:
"""Return nodes this link connects."""
self.lab.sync_topology_if_outdated()
return self.node_a, self.node_b
@property
@locked
def interfaces(self) -> tuple[Interface, Interface]:
self.lab.sync_topology_if_outdated()
return self.interface_a, self.interface_b
@property
def label(self) -> Optional[str]:
self.lab.sync_topology_if_outdated()
return self._label
@locked
def as_dict(self) -> dict[str, str]:
return {
"id": self.id,
"interface_a": self.interface_a.id,
"interface_b": self.interface_b.id,
}
@property
def lab_base_url(self) -> str:
return self.lab.lab_base_url
@property
def base_url(self) -> str:
return self.lab_base_url + "/links/{}".format(self.id)
def remove(self):
self.lab.remove_link(self)
@check_stale
def _remove_on_server(self) -> None:
_LOGGER.info("Removing link %s", self)
url = self.base_url
self._session.delete(url)
def remove_on_server(self) -> None:
warnings.warn(
"'Link.remove_on_server()' is deprecated, use 'Link.remove()' instead.",
DeprecationWarning,
)
self._remove_on_server()
def wait_until_converged(
self, max_iterations: Optional[int] = None, wait_time: Optional[int] = None
) -> None:
_LOGGER.info("Waiting for link %s to converge", self.id)
max_iter = (
self.lab.wait_max_iterations if max_iterations is None else max_iterations
)
wait_time = self.lab.wait_time if wait_time is None else wait_time
for index in range(max_iter):
converged = self.has_converged()
if converged:
_LOGGER.info("Link %s has converged", self.id)
return
if index % 10 == 0:
_LOGGER.info(
"Link has not converged, attempt %s/%s, waiting...",
index,
max_iter,
)
time.sleep(wait_time)
msg = "Link %s has not converged, maximum tries %s exceeded" % (
self.id,
max_iter,
)
_LOGGER.error(msg)
# after maximum retries are exceeded and link has not converged
# error must be raised - it makes no sense to just log info
# and let client fail with something else if wait is explicitly
# specified
raise RuntimeError(msg)
@check_stale
def has_converged(self) -> bool:
url = self.base_url + "/check_if_converged"
converged = self._session.get(url).json()
return converged
@check_stale
def start(self, wait: Optional[bool] = None) -> None:
url = self.base_url + "/state/start"
self._session.put(url)
if self.lab.need_to_wait(wait):
self.wait_until_converged()
@check_stale
def stop(self, wait: Optional[bool] = None) -> None:
url = self.base_url + "/state/stop"
self._session.put(url)
if self.lab.need_to_wait(wait):
self.wait_until_converged()
@check_stale
def set_condition(
self, bandwidth: int, latency: int, jitter: int, loss: float
) -> None:
"""
Applies conditioning to this link.
:param bandwidth: desired bandwidth, 0-10000000 kbps
:param latency: desired latency, 0-10000 ms
:param jitter: desired jitter, 0-10000 ms
:param loss: desired loss, 0-100%
"""
url = self.base_url + "/condition"
data = {
"bandwidth": bandwidth,
"latency": latency,
"jitter": jitter,
"loss": loss,
}
self._session.patch(url, json=data)
@check_stale
def get_condition(self) -> dict:
"""
Retrieves the current condition on this link.
If there is no link condition applied, an empty dictionary is returned.
(Note: this used to (erroneously) say None would be returned
when no condition is applied, but that was never the case.)
:return: the applied link condition
"""
url = self.base_url + "/condition"
condition = self._session.get(url).json()
keys = ["bandwidth", "latency", "jitter", "loss"]
result = {k: v for (k, v) in condition.items() if k in keys}
return result
@check_stale
def remove_condition(self) -> None:
"""
Removes link conditioning.
If there's no condition applied then this is a no-op for the controller.
"""
url = self.base_url + "/condition"
self._session.delete(url)
def set_condition_by_name(self, name: str) -> None:
"""
A convenience function to provide
some commonly used link condition settings for various link types.
Inspired by: https://github.com/tylertreat/comcast
========= ============ ========= ========
Name Latency (ms) Bandwidth Loss (%)
========= ============ ========= ========
gprs 500 50 kbps 2.0
edge 300 250 kbps 1.5
3g 250 750 kbps 1.5
dialup 185 40 kbps 2.0
dsl1 70 2 mbps 2.0
dsl2 40 8 mbps 0.5
wifi 10 30 mbps 0.1
wan1 80 256 kbps 0.2
wan2 80 100 mbps 0.2
satellite 1500 1 mbps 0.2
========= ============ ========= ========
:param name: the predefined condition name as outlined in the table above
:raises ValueError: if the given name isn't known
"""
options = {
"gprs": (500, 50, 2.0),
"edge": (300, 250, 1.5),
"3g": (250, 750, 1.5),
"dialup": (185, 40, 2.0),
"dsl1": (70, 2000, 2.0),
"dsl2": (40, 8000, 0.5),
"wifi": (40, 30000, 0.2),
"wan1": (80, 256, 0.2),
"wan2": (80, 100000, 0.2),
"satellite": (1500, 1000, 0.2),
}
if name not in options:
msg = "unknown condition name '{}', known values: '{}'".format(
name,
", ".join(sorted(options)),
)
_LOGGER.error(msg)
raise ValueError(msg)
latency, bandwidth, loss = options[name]
self.set_condition(bandwidth, latency, 0, loss)
|
985,548 | c235d9533272429b5f21a13b29f23b54c137e5de | from random import randrange
from tkinter import *
class MainWindow():
def __init__(self, root, W=10, H=16):
self.W, self.H = W, H
self.root = root
self.root.title = "Game"
self.canvas = Canvas(root, width=self.W*30, height=self.H*30, bg="white")
self.canvas.grid(column = 0, row = 0)
self.boxes = {}
self.box_vars = {(x, y):IntVar(0) for x in range(self.W) for y in range(self.H)}
self.trace_vars = {}
for y in range(self.H):
for x in range(self.W):
self.boxes[(x, y)] = Checkbutton(self.canvas, variable=self.box_vars[(x, y)])
self.boxes[(x, y)].grid(column=x, row=y)
self.reset_button = Button(self.root, text="Reset", command=self.randomize_board)
self.reset_button.grid()
self.randomize_board()
def randomize_board(self):
for box in self.trace_vars:
self.box_vars[box].trace_vdelete("w", self.trace_vars[box])
for _ in range(self.W * self.H * 2):
x, y = randrange(self.W), randrange(self.H)
if (x, y) not in self.box_vars:
print(x, y)
for coord in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y -1)):
if coord in self.box_vars:
self.box_vars[coord].set(1 - self.box_vars[coord].get())
for box in self.box_vars:
self.trace_vars[box] = self.box_vars[box].trace("w",
lambda a,b,c, box=box: self.update_squares(box))
def draw(self): self.root.mainloop()
def update_squares(self, coords):
x, y = coords
l_coords = ((x, y), (x + 1, y), (x - 1, y), (x, y + 1), (x, y -1))
for coord in l_coords:
if coord in self.box_vars:
self.box_vars[coord].trace_vdelete("w", self.trace_vars[coord])
self.box_vars[coord].set(1 - self.box_vars[coord].get())
self.trace_vars[coord] = self.box_vars[coord].trace("w",
lambda a,b,c, coord=coord: self.update_squares(coord))
all_on, all_off = True, True
for coord in self.box_vars:
if self.box_vars[coord].get() == 0: all_on= False
elif self.box_vars[coord].get() == 1: all_off = False
if all_on or all_off:
self.child = WinWindow(Tk())
self.child.draw()
class WinWindow():
def __init__(self, root):
self.root = root
self.root.title = "You Win!"
self.label = Label(self.root, text = "You Win!")
self.label.pack()
self.button = Button(self.root, text="Close", command=self.root.destroy)
self.button.pack()
def draw(self): self.root.mainloop()
a = MainWindow(Tk())
a.draw() |
985,549 | 3f144c999c53b9796954ce04f699114dcd5ceff2 | # import libraries
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from sklearn import preprocessing
import re
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.externals import joblib
import sys
def load_data(database_filepath):
"""Load dataframe from sql db
Parameters:
database_filepath (string): Path to database file to export
Returns:
X (Dataframe): Feature Vector
Y (Dataframe): Target Vector
category_names (List): List of strings for column names for categories
"""
# load data from database
engine = create_engine('sqlite:///' + database_filepath)
sql = "SELECT * FROM message"
df = pd.read_sql_query(sql, engine)
X = df.message
Y = df[df.columns.tolist()[4:]]
'''
ndf = df.groupby('genre').count()
key = [x for x in ndf.index]
'''
return(X, Y, df.columns.tolist()[4:])
def tokenize(text):
"""Function returns after performing preprocessing steps on text including
tolower, tokenization, stopwords removal and lemmatize
Parameters:
text (string): Refers to individual words passed in
Returns:
stemmed(string): Returns text with operations performed.
"""
text = text.lower()
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
words = word_tokenize(text)
words = [w for w in words if w not in stopwords.words("english")]
stemmed = [WordNetLemmatizer().lemmatize(w) for w in words]
return(stemmed)
def build_model():
"""Build Machine Learning Model
Returns (model): Pipeline and gridsearch model
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(DecisionTreeClassifier()))
])
parameters = {'clf__estimator__min_samples_split':[2, 4, 6],
'clf__estimator__max_depth': [2, 4]}
#parameters = {'clf__estimator__min_samples_split':[2]}
cv = GridSearchCV(pipeline, parameters)
return(cv)
def evaluate_model(model, X_test, Y_test, category_names):
"""
Function returns the performance of test set for each category_names
model (model): Model passed in for prediction
X_test (dataframe): Test set input features to predict on
Y_test (dataframe): Ground truth target values
category_names (List): String list of target category names
"""
print("Testing Performance")
print(classification_report(Y_test, model.predict(X_test), target_names=category_names))
#Todo cat names
def save_model(model, model_filepath):
"""Saves model passed in to specified filepath
model (model): Model to save
model_filepath (string): Location to save model
"""
joblib.dump(model, model_filepath)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
best_model = model.best_estimator_
print('Evaluating model...')
evaluate_model(best_model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(best_model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
985,550 | fe9226da9357391b50f552e66b15a88f54f29df4 | from django.contrib import admin
from .models import Chapter, Recipe
admin.site.register([Chapter, Recipe])
|
985,551 | 60022294439eb413e2a828ce6c4e6953133d6912 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 08 17:16:36 2017
@author: nitin.kotcherlakota
"""
import pygame
pygame.init()
#To set a resolution.
surface = pygame.display.set_mode((800,400))
pygame.display.set_caption('Helicopter')
#All computer games has frames per second
clock = pygame.time.Clock()
game_over = False
#display.flip will update the entire surface and display.update will update certain
#parts or surfaces. But if display.update does not have any parameters it will work similar to
#flip
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
pygame.display.update()
clock.tick(60)
pygame.quit()
quit()
|
985,552 | a4b0f9ac34dc2c544385e08e6d3b6c435c224cfd | def alphabet_position(text):
import string
alphanumeric_filter = filter(str.isalnum, text)
tt = "".join(alphanumeric_filter)
m = tt.lower()
k = list(m)
lst = list()
for char in k:
k = string.ascii_lowercase.index(char) + 1
lst.append(k)
stringList = ' '.join([str(item) for item in lst ])
return stringList
print(alphabet_position("The sunset sets at twelve o clock")) |
985,553 | 66842081a9070fa2a0ec18537c0780ba6af98f61 | import pandas as pd
csv = pd.read_csv("seeds_dataset.csv")
data = csv.values.tolist()
import numpy as np
def normalize_data(data_list):
data = np.asarray(data_list)
col_maxes = data.max(axis=0)
return (data / col_maxes[np.newaxis, :]).tolist()
splitted = []
for value in data:
tmp = value[0].split("\t")
tmp2 = []
for str in tmp:
if str != '':
tmp2.append(float(str))
splitted.append(tmp2)
splitted = normalize_data(splitted)
proper_format = []
for value in splitted:
tmp = [0] * 2
tmp_target = [0, 0, 0]
tmp_target[(int(value[7]*3.2)- 1)] = 1
tmp[0] = value[0:7]
tmp[1] = tmp_target
proper_format.append(tmp)
import random
random.shuffle(proper_format)
training_sets = proper_format[0:140].copy()
test_sets = proper_format[140:len(proper_format)].copy()
from NeuralNetwork import NeuralNetwork
import matplotlib.pyplot as plt
def test_case(iterations, size, learning_rate, momentum):
nn = NeuralNetwork(size, learning_rate, momentum)
history_errors = []
i = 0
while i < iterations :
training_inputs, training_outputs = random.choice(training_sets)
nn.train(training_inputs, training_outputs)
if i % (iterations / 100) == 0:
error = nn.calculate_total_error(training_sets)
history_errors.append(error)
i = i + 1
accurate_outputs = 0.0
for i in range(len(test_sets)):
output = nn.feed_forward(test_sets[i][0])
max_index_output = output.index(max(output))
max_index_target = test_sets[i][1].index(max(test_sets[i][1]))
if max_index_output == max_index_target:
accurate_outputs = accurate_outputs + 1.0
test_rate = accurate_outputs / float(len(test_sets))
print("iterations = ", iterations, "size = ", size, "Learning rate: ", learning_rate, " Momentum: ", momentum,"Test rate ", test_rate)
plt.plot(history_errors)
plt.xlabel("History points.")
plt.ylabel("Total error.")
plt.title("History of total errors.")
plt.show()
print("Exp. 4 - various iterations.")
test_case(500, [7,7,3], 0.5, 0.3)
test_case(500, [7,7,3], 0.5, 0.3)
test_case(500, [7,7,3], 0.5, 0.3)
print()
test_case(1500, [7,7,3], 0.5, 0.3)
test_case(1500, [7,7,3], 0.5, 0.3)
test_case(1500, [7,7,3], 0.5, 0.3)
print()
test_case(5000, [7,7,3], 0.5, 0.3)
test_case(5000, [7,7,3], 0.5, 0.3)
test_case(5000, [7,7,3], 0.5, 0.3)
print("Exp. 5 - various learning rate.")
test_case(2000, [7,7,3], 0.2, 0.0)
test_case(2000, [7,7,3], 0.2, 0.0)
test_case(2000, [7,7,3], 0.2, 0.0)
print()
test_case(2000, [7,7,3], 0.5, 0.0)
test_case(2000, [7,7,3], 0.5, 0.0)
test_case(2000, [7,7,3], 0.5, 0.0)
print()
test_case(2000, [7,7,3], 0.8, 0.0)
test_case(2000, [7,7,3], 0.8, 0.0)
test_case(2000, [7,7,3], 0.8, 0.0)
print()
print("Exp. 6 - various momentum.")
test_case(2000, [7,7,3], 0.3, 0.1)
test_case(2000, [7,7,3], 0.3, 0.1)
test_case(2000, [7,7,3], 0.3, 0.1)
print()
test_case(2000, [7,7,3], 0.3, 0.5)
test_case(2000, [7,7,3], 0.3, 0.5)
test_case(2000, [7,7,3], 0.3, 0.5)
print()
test_case(2000, [7,7,3], 0.3, 0.9)
test_case(2000, [7,7,3], 0.3, 0.9)
test_case(2000, [7,7,3], 0.3, 0.9)
print()
print('b') |
985,554 | 405806dbde04b8c55748cd8be55342344e887d72 | # -*- coding: utf-8 -*-
# Rodrigo - Prova 2, Correção de Texto, 3088
while True: #Repetirá o código até todos os casos forem testados.
try: #Serve para evitar o erro de EOF.
Frase=str(input()) #Entrada da frase a ser corrigida.
i=0 #Será usado abaixo.
while True:
if Frase[i]==' ' and (Frase[i+1]==',' or Frase[i+1]=='.'):
Frase=list(Frase) #Transforma a string em lista, facilitando a análise.
del Frase[i] #Remove o espaço antes da vírgula.
Frase=''.join(Frase) #Retorna a variável "Frase" à forma de string.
if (len(Frase)-1)==i: #Impede o erro "Out of index", ao garantir que o loop se encerre antes de o i superar o tamanho da string "frase".
break
i+=1 #Define o próximo valor a ser testado no primeiro "if" deste loop.
print(Frase) #Imprime na tela a frase corrigida.
except EOFError: #Realiza o término da execução do código até a ocorrência do EOF, como pede o problema.
break |
985,555 | 696a8390c8056eb6260750d4e8f51d59e2847267 | import urllib
import urllib2
#python debug file ,test at python27
class Log:
serverURL = "http://17ky.xyt:8080"
token = 'xytschool'
forbidden = False
error = ''
def setServer(self ,url):
self.serverURL = url
def setToken(self, token):
self.token= token
def send(self , type,content ,gruop):
frame = {'token': self.token,
'type':type ,
'group':gruop,
'data':content,
'contentType':'text'}
return self._send(self.serverURL , frame )
def _send(self , requrl ,frame ):
_frame = urllib.urlencode(frame)
req = urllib2.Request(url = requrl,data =_frame)
res_data = urllib2.urlopen(req)
res = res_data.read()
return res
def info(self ,content ,group='all'):
return self.send('info',content,group)
def waring(self ,content ,group='all'):
return self.send('waring',content,group)
def error(self ,content ,group='all'):
return self.send('error',content,group)
#test
log = Log()
res = log.waring('run at lin 43')
res = log.info('run at lin 44')
res = log.error('run at lin 45')
res = log.waring('run at lin 43' ,'group1')
res = log.info('run at lin 44' ,'group2')
res = log.error('run at lin 45' ,'group1')
print(res)
|
985,556 | d6c3fd99692d9eeb4f68b01113811be0ee0798de | # -*- coding: utf-8 -*-
"""RunMain.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kCQxSaqlZvUz19ZfPYekVR6uZCFHbtF6
"""
from google.colab import drive
drive.mount('/content/gdrive')
import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import scipy
import os
os.chdir('/content/gdrive/My Drive/MP2/')
def cvt2LAB(img, show):
lab= cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
plt.imshow(lab)
if(show):
plt.show()
l, a, b = cv2.split(lab)
plt.imshow(l, cmap ='gray')
if(show):
plt.show()
plt.imshow(a, cmap= 'gray')
if(show):
plt.show()
plt.imshow(b, cmap = 'gray')
if(show):
plt.show()
labm = cv2.medianBlur(lab, 9)
plt.imshow(labm)
if(show):
plt.show()
return lab, labm, l, a, b
def extractGabor(img, ksizeRange, sigmaRange, thetaRange, gammaRange, lamdaRange, show):
features = []
fmasks = []
dim = len(img.shape)
for ksize in np.arange(ksizeRange[0], ksizeRange[1], ksizeRange[2]):
for sigma in np.arange(sigmaRange[0], sigmaRange[1], sigmaRange[2]):
for lamda in np.arange(lamdaRange[0], lamdaRange[1], lamdaRange[2]):
for gamma in np.arange(gammaRange[0], gammaRange[1], gammaRange[2]):
for theta in np.arange(thetaRange[0], thetaRange[1], thetaRange[2]):
k = cv2.getGaborKernel((ksize, ksize), sigma, theta, lamda, gamma, 0, ktype=cv2.CV_32F)
fimg = cv2.filter2D(img, cv2.CV_8UC1, k)
fimg = cv2.medianBlur(fimg, 5)
#fimg = cv2.GaussianBlur(fimg, (51,51), 5, 5)
if(show):
plt.figure(figsize = (8,8))
plt.imshow(fimg, cmap = 'gray')
plt.show()
print(ksize, sigma, gamma, lamda, theta)
if(dim == 2):
#ret,fmask = cv2.threshold(fimg,254,1,cv2.THRESH_BINARY_INV)
#fmask = fmask.reshape((fimg.shape[0]*fimg.shape[1],))
fimg = fimg.reshape((fimg.shape[0]*fimg.shape[1],))
features.append(fimg)
#features.append(fmask)
elif(dim == 3):
fimg = fimg.reshape((fimg.shape[0]*fimg.shape[1],3))
features.append(fimg[:,0])
features.append(fimg[:,1])
features.append(fimg[:,2])
else:
print("Channels error")
return 0
features = np.array(features)
features = features.T
return features
def addLABfeatures(features, labm):
features = np.hstack((features,labm[:,:,0].reshape((labm.shape[0]*labm.shape[1]),1)))
features = np.hstack((features,labm[:,:,1].reshape((labm.shape[0]*labm.shape[1]),1)))
features = np.hstack((features,labm[:,:,2].reshape((labm.shape[0]*labm.shape[1]),1)))
return features
def kmeansClustering(features, n_clusters):
kmeans = KMeans(n_clusters=n_clusters, init = 'k-means++')
kmeans.fit(features)
y = kmeans.predict(features)
return kmeans, y
#Find Background Class as most populated class and setting it to zero
def bgToZero(img_seg):
counts = np.bincount(img_seg.flatten())
background = np.argmax(counts)
if(background):
print("Changing BG")
img_seg[img_seg == background] = 255
img_seg[img_seg == 0] = background
img_seg[img_seg == 255] = 0
return img_seg
#Finding Cell and Cytoplasm Clusters
def findCorrectLabel(img_seg,lab):
clusters = np.unique(img_seg)
num_clusters = len(clusters)
clusters = clusters[1:] #As label zero is background
#Masks for each label
mask1 = np.zeros(img_seg.shape)
mask2 = np.zeros(img_seg.shape)
mask1[img_seg == clusters[0]] = 255
mask2[img_seg == clusters[1]] = 255
#Finding histogram for a space
hist1 = cv2.calcHist([lab],[1],np.uint8(mask1),[255],[0,256])
peak1 = np.argsort(-hist1.flatten())[0]
hist2 = cv2.calcHist([lab],[1],np.uint8(mask2),[255],[0,256])
peak2 = np.argsort(-hist2.flatten())[0]
if(num_clusters==3):
if(peak1 > peak2):
cell = clusters[0]
cyto = clusters[1]
else:
cell = clusters[1]
cyto = clusters[0]
m_cell=0
b_cell=0
return cell, cyto,b_cell,m_cell
if(num_clusters==4):
mask3 = np.zeros(img_seg.shape)
mask3[img_seg == clusters[2]] = 255
hist3 = cv2.calcHist([lab],[1],np.uint8(mask3),[255],[0,256])
peak3 = np.argsort(-hist3.flatten())[0]
cell=0
p = np.array([peak1, peak2, peak3])
ind = np.argsort(p)
cyto = clusters[ind[0]]
b_cell = clusters[ind[1]]
m_cell = clusters[ind[2]]
return cell,cyto,b_cell,m_cell
def findTumors(lab, img_seg, cell, cyto, b_cell, m_cell, malignant, benign):
#Find Isolated Cells
imgEnh = enhance(lab)
if(malignant):
isoMask = getIsolatedCells(imgEnh, 70)
else:
isoMask = getIsolatedCells(imgEnh)
#Finding contours and respective areas
contours, heirarchy = cv2.findContours(np.uint8(img_seg), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
n = len(contours)
areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
areas.append(area)
#Sorting areas in descending order
areas = np.array(areas)
ind = np.argsort(-1*areas)
#Getting indices of tumours
numTumors = malignant + benign
tumorInd = ind[0:numTumors]
z = np.zeros(img_seg.shape)
if(not malignant or not benign):
#Creating a mask with tumors of largest area
for i in tumorInd:
cv2.drawContours( z, contours[i], -1, (255,255,255), 3)
masked_image = scipy.ndimage.morphology.binary_fill_holes(z)
z[masked_image] = img_seg[masked_image]
z = np.int64(z)
plt.imshow(z)
plt.show()
counts = np.bincount(z.flatten())
if(len(counts)-1 < cell or len(counts)-1 <cyto):
counts = np.append(counts, [0])
if(counts[cell] < counts[cyto]):
img_seg[z == cyto] = 0
img_seg = findTumors(lab, img_seg, cell, cyto, b_cell, m_cell, malignant, benign)
else:
img_seg[img_seg == cyto] = 0
img_seg[isoMask] = cell
img_seg[masked_image] = cyto
else:
tumor = np.zeros(img_seg.shape)
for i in np.arange(0,numTumors,1):
z = np.zeros(img_seg.shape)
cv2.drawContours(z,contours[ind[i]], -1,(255,255,255), 3 )
cv2.drawContours(tumor,contours[ind[i]], -1,(255,255,255), 3 )
masked_image = scipy.ndimage.morphology.binary_fill_holes(z)
z[masked_image] = img_seg[masked_image]
counts = np.bincount(img_seg[masked_image].flatten())
if(len(counts)-1 < m_cell or len(counts)-1 <cyto or len(counts)-1 <b_cell):
counts = np.append(counts, [0])
print(counts[m_cell], counts[b_cell], counts[cyto])
if(counts[m_cell] >= counts[cyto] and counts[m_cell]> counts[b_cell]):
img_seg[masked_image] = m_cell
elif (counts[b_cell]> counts[m_cell] ):
img_seg[masked_image] = b_cell
else:
img_seg[z == cyto] = 0
img_seg = findTumors(img_seg, cell, cyto, b_cell, m_cell, malignant, benign)
img_seg_with_iso = np.copy(img_seg)
tumorMask = scipy.ndimage.morphology.binary_fill_holes(tumor)
img_seg[img_seg == cyto] = 0
img_seg[img_seg == b_cell] = cyto
img_seg[isoMask] = cyto
img_seg[tumorMask] = img_seg_with_iso[tumorMask]
return img_seg
def enhance(lab):
l,a,b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=7.0, tileGridSize=(8,8))
cla = clahe.apply(a)
#plt.imshow(cla)
#plt.show()
#-----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv2.merge((l,cla,b))
#plt.imshow(limg)
#plt.show()
return cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
def getIsolatedCells(imgEnh, *args):
if(len(args)):
thresh = args[0]
else:
thresh = 50 #After Histogram Analysis
#imgEnh[:,:,1] = cv2.medianBlur(imgEnh[:,:,1], 9)
isoMask = imgEnh[:,:,1] < thresh
return isoMask
def diceScore(num_clusters, img_seg, img_gt,no_malig,no_benign):
"""Dice Score"""
isolatedMask = img_seg
img_gt_gray = cv2.cvtColor(img_gt,cv2.COLOR_BGR2GRAY)
if(len(isolatedMask[(isolatedMask<1) & (isolatedMask>0)])!=0):
isolatedMask[(isolatedMask<1) & (isolatedMask>0)]=2
gray_values=[]
img_shape = img_gt_gray.shape
img_gt_gray.reshape(img_shape[0]*img_shape[1])
isolatedMask.reshape(img_shape[0]*img_shape[1])
for i in range(num_clusters-1):
gray_values.append(int(np.mean(img_gt_gray[(img_gt_gray>=255*i/(num_clusters)) & (img_gt_gray<255*(i+1)/(num_clusters))])))
gray_values.append(255)
print(gray_values)
for i in range(num_clusters):
img_gt_gray[img_gt_gray==gray_values[i]]=num_clusters-1-i
gt_pixel_ratios=[]
seg_pixel_ratios=[]
for i in range(num_clusters):
seg_pixel_ratios.append(np.sum(isolatedMask[isolatedMask==i]==i))
gt_pixel_ratios.append(np.sum(img_gt_gray[img_gt_gray==i]==i))
seg_order = np.argsort(seg_pixel_ratios)
gt_order = np.argsort(gt_pixel_ratios)
for i in range(num_clusters):
isolatedMask[isolatedMask==seg_order[i]]= num_clusters+gt_order[i]
for i in range(num_clusters):
isolatedMask[isolatedMask==(num_clusters+gt_order[i])]= gt_order[i]
isolatedMask.reshape((img_shape[0],img_shape[1]))
dice = []
confusion_matrix=[]
for k in range(num_clusters):
dice.append(np.sum(isolatedMask[img_gt_gray==k]==k)*2.0 / (np.sum(isolatedMask[isolatedMask==k]==k) + np.sum(img_gt_gray[img_gt_gray==k]==k)))
row=[]
for i in range(num_clusters):
row.append(np.sum(img_gt_gray[isolatedMask==i]==k))
confusion_matrix.append(row)
print(dice)
print(confusion_matrix)
def main(new, img_fname, *args):
#Reading images
img = cv2.imread(img_fname)
if(not new): #If not a new image
img_gt_fname = args[0]
img_gt = cv2.imread(img_gt_fname)
img_gt = np.array(img_gt)
plt.imshow(img)
plt.show()
print("Malignant:")
mal = input()
print("Benign: ")
ben = input()
mal = int(mal)
ben = int(ben)
if(not mal or not ben):
num_clusters = 3
else:
num_clusters = 4
#Convert to Lab Space
# Outputs : lab space, median filtered lab images, l, a, b channels separately
# Inputs : rgb image, show = 1, to show the lab images. Show = 0 to not display
lab,labm, l, a, b = cvt2LAB(img,0)
#Extract Gabor Features
# FN: extractGabor(img, ksizeRange, sigmaRange, thetaRange, gammaRange, lamdaRange, show)
features = extractGabor(lab, [9,10,2], [1,2,1], [0,1,1], [0.5,1.25,0.25], [3.25,4.1,0.25], 0)
#Feature Selection
features = np.hstack([features[:,0:12], features[:,18:36]])
#Adding LAB Colour channels to feature space
#features = addLABfeatures(features, labm)
#Kmeans - inputs : feature vectors, no. of clusters
#Kmeans - outputs : kmeans object, label vector
[kmeans, y] = kmeansClustering(features, num_clusters)
img_seg = y.reshape((img.shape[0], img.shape[1]))
img_seg = cv2.medianBlur(np.uint8(img_seg), 9)
#Display Image
plt.figure()
plt.imshow(img)
plt.title('Original image', fontsize = 20)
plt.show()
plt.figure()
plt.imshow(img_seg , cmap = 'gray')
plt.title('Segmented image after kmeans', fontsize = 20)
plt.show()
#Re-labelling the image
img_seg = bgToZero(img_seg)
cell, cyto,b_cell, m_cell = findCorrectLabel(img_seg, lab) #Finds the correct labels for cell and cytoplasm clusters
#Finding the tumors
img_seg_copy = np.copy(img_seg)
img_seg_withTumor = findTumors(lab, img_seg_copy, cell, cyto, b_cell, m_cell, mal, ben)
#Dice Score
if(not new):
fig2, (a3,a4) = plt.subplots(1,2, figsize=(15,15))
a3.imshow(img_seg_withTumor, cmap = 'gray')
a3.set_title('Final Segmented Image Output', fontsize = 20)
a4.imshow(img_gt)
a4.set_title('Ground Truth', fontsize = 20)
fig2.show()
dice = diceScore(num_clusters, img_seg_withTumor, img_gt, mal, ben)
else:
plt.figure(figsize = (10,10))
plt.imshow(img_seg_withTumor, cmap = 'gray')
plt.title('Final Segmented Image Output', fontsize = 15)
plt.show()
return img_seg_withTumor
img_seg = main(0, 'malignant_4x/_11019.tif', 'malignant_4x/_11019_gt.png')
print(cv2.cvtColor(img_gt) |
985,557 | f25bfb8d98bd61524dfb849e2ce2ce0973208f12 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
from contextlib import contextmanager
from typing import Any, Callable, Iterable, List, Optional, TYPE_CHECKING, Iterator
from numpy.random import RandomState
from nni.mutable import (
LabeledMutable, MutableList, MutableDict, Categorical, Mutable, SampleValidationError,
Sample, SampleMissingError, label_scope, auto_label, frozen_context
)
from .space import ModelStatus
if TYPE_CHECKING:
from .graph import GraphModelSpace
__all__ = ['MutationSampler', 'Mutator', 'StationaryMutator', 'InvalidMutation', 'MutatorSequence', 'Mutation']
Choice = Any
class MutationSampler:
"""
Handles :meth:`Mutator.choice` calls.
Choice is the only supported type for mutator.
"""
def choice(self, candidates: List[Choice], mutator: 'Mutator', model: GraphModelSpace, index: int) -> Choice:
raise NotImplementedError()
def mutation_start(self, mutator: 'Mutator', model: GraphModelSpace) -> None:
pass
def mutation_end(self, mutator: 'Mutator', model: GraphModelSpace) -> None:
pass
class Mutator(LabeledMutable):
"""
Mutates graphs in model to generate new model.
By default, mutator simplifies to a single-value dict with its own label as key, and itself as value.
At freeze, the strategy should provide a :class:`MutationSampler` in the dict.
This is because the freezing of mutator is dynamic
(i.e., requires a variational number of random numbers, dynamic ranges for each random number),
and the :class:`MutationSampler` here can be considered as some random number generator
to produce a random sequence based on the asks in :meth:`Mutator.mutate`.
On the other hand, a subclass mutator should implement :meth:`Mutator.mutate`, which calls :meth:`Mutator.choice` inside,
and :meth:`Mutator.choice` invokes the bounded sampler to "random" a choice.
The label of the mutator in most cases is the label of the nodes on which the mutator is applied to.
I imagine that mutating any model space (other than graph) might be useful,
but we would postpone the support to when we actually need it.
"""
def __init__(self, *, sampler: Optional[MutationSampler] = None, label: Optional[str] = None):
self.sampler: Optional[MutationSampler] = sampler
self.label: str = auto_label(label)
self.model: Optional[GraphModelSpace] = None
self._cur_model: Optional[GraphModelSpace] = None
self._cur_choice_idx: Optional[int] = None
def extra_repr(self) -> str:
return f'label={self.label!r}'
def leaf_mutables(self, is_leaf: Callable[[Mutable], bool]) -> Iterable[LabeledMutable]:
"""By default, treat self as a whole labeled mutable in the format dict.
Sub-class can override this to dry run the mutation upon the model and return the mutated model
for the followed-up dry run.
See Also
--------
nni.mutable.Mutable.leaf_mutables
"""
# Same as `leaf_mutables` in LabeledMutable.
return super().leaf_mutables(is_leaf)
def check_contains(self, sample: Sample) -> SampleValidationError | None:
"""Check if the sample is valid for this mutator.
See Also
--------
nni.mutable.Mutable.check_contains
"""
if self.label not in sample:
return SampleMissingError(f"Mutator {self.label} not found in sample.")
if not isinstance(sample[self.label], MutationSampler):
return SampleValidationError(f"Mutator {self.label} is not a MutationSampler.")
return None
def freeze(self, sample: dict[str, Any]) -> GraphModelSpace:
"""When freezing a mutator, we need a model to mutate on, as well as a sampler to generate choices.
As how many times the mutator is applied on the model is often variational,
a sample with fixed length will not work.
The dict values in ``sample`` should be a sampler inheriting :class:`MutationSampler`.
But there are also cases where ``simplify()`` converts the mutation process into some fixed operations
(e.g., in :class:`StationaryMutator`).
In this case, sub-class should handle the freeze logic on their own.
:meth:`Mutator.freeze` needs to be called in a ``bind_model`` context.
"""
self.validate(sample)
assert self.model is not None, 'Mutator must be bound to a model before freezing.'
return self.bind_sampler(sample[self.label]).apply(self.model)
def bind_sampler(self, sampler: MutationSampler) -> Mutator:
"""Set the sampler which will handle :meth:`Mutator.choice` calls."""
self.sampler = sampler
return self
@contextmanager
def bind_model(self, model: GraphModelSpace) -> Iterator[Mutator]:
"""Mutators need a model, based on which they generate new models.
This context manager binds a model to the mutator, and unbinds it after the context.
Examples
--------
>>> with mutator.bind_model(model):
... mutator.simplify()
"""
try:
self.model = model
yield self
finally:
self.model = None
def apply(self, model: GraphModelSpace) -> GraphModelSpace:
"""
Apply this mutator on a model.
The model will be copied before mutation and the original model will not be modified.
Returns
-------
The mutated model.
"""
assert self.sampler is not None
copy = model.fork()
copy.status = ModelStatus.Mutating
self._cur_model = copy
self._cur_choice_idx = 0
self._cur_samples = []
# Some mutate() requires a full mutation history of the model.
# Therefore, parent needs to be set before the mutation.
copy.parent = Mutation(self, self._cur_samples, model, copy)
self.sampler.mutation_start(self, copy)
self.mutate(copy)
self.sampler.mutation_end(self, copy)
self._cur_model = None
self._cur_choice_idx = None
return copy
def mutate(self, model: GraphModelSpace) -> None:
"""
Abstract method to be implemented by subclass.
Mutate a model in place.
"""
raise NotImplementedError()
def choice(self, candidates: Iterable[Choice]) -> Choice:
"""Ask sampler to make a choice."""
assert self.sampler is not None and self._cur_model is not None and self._cur_choice_idx is not None
ret = self.sampler.choice(list(candidates), self, self._cur_model, self._cur_choice_idx)
self._cur_samples.append(ret)
self._cur_choice_idx += 1
return ret
def random(self, memo: Sample | None = None, random_state: RandomState | None = None) -> GraphModelSpace | None:
"""Use a :class:`_RandomSampler` that generates a random sample when mutates.
See Also
--------
nni.mutable.Mutable.random
"""
sample: Sample = {} if memo is None else memo
if random_state is None:
random_state = RandomState()
if self.label not in sample:
sample[self.label] = _RandomSampler(random_state)
if self.model is not None:
# Model is binded, perform the freeze.
return self.freeze(sample)
else:
# This will only affect the memo.
# Parent random will take care of the freeze afterwards.
return None
class StationaryMutator(Mutator):
"""A mutator that can be dry run.
:class:`StationaryMutator` invoke :class:`StationaryMutator.dry_run` to predict choice candidates,
such that the mutator simplifies to some static choices within `simplify()`.
This could be convenient to certain algorithms which do not want to handle dynamic samplers.
"""
def __init__(self, *, sampler: Optional[MutationSampler] = None, label: Optional[str] = None):
super().__init__(sampler=sampler, label=label)
self._dry_run_choices: Optional[MutableDict] = None
def leaf_mutables(self, is_leaf: Callable[[Mutable], bool]) -> Iterable[LabeledMutable]:
"""Simplify this mutator to a number of static choices. Invokes :meth:`StationaryMutator.dry_run`.
Must be wrapped in a ``bind_model`` context.
"""
assert self.model is not None, 'Mutator must be bound to a model before calling `simplify()`.'
choices, model = self.dry_run(self.model)
self._dry_run_choices = MutableDict(choices)
yield from self._dry_run_choices.leaf_mutables(is_leaf)
self.model = model
def check_contains(self, sample: dict[str, Any]):
if self._dry_run_choices is None:
raise RuntimeError(
'Dry run choices not found. '
'Graph model space with stationary mutators must first invoke `simplify()` before freezing.'
)
return self._dry_run_choices.check_contains(sample)
def freeze(self, sample: dict[str, Any]) -> GraphModelSpace:
self.validate(sample)
assert self._dry_run_choices is not None
assert self.model is not None
# The orders should be preserved here
samples = [sample[label] for label in self._dry_run_choices]
# We fake a FixedSampler in this freeze to consume the already-generated samples.s
sampler = _FixedSampler(samples)
return self.bind_sampler(sampler).apply(self.model)
def dry_run(self, model: GraphModelSpace) -> tuple[dict[str, Categorical], GraphModelSpace]:
"""Dry run mutator on a model to collect choice candidates.
If you invoke this method multiple times on same or different models,
it may or may not return identical results, depending on how the subclass implements `Mutator.mutate()`.
Recommended to be used in :meth:`simplify` if the mutator is static.
"""
sampler_backup = self.sampler
recorder = _RecorderSampler()
self.sampler = recorder
new_model = self.apply(model)
self.sampler = sampler_backup
# Local import to avoid name conflict.
from nni.mutable.utils import label
# NOTE: This is hacky. It fakes a label object by splitting the label string.
_label = label(self.label.split('/'))
if len(recorder.recorded_candidates) != 1:
# If the mutator is applied multiple times on the model (e.g., applied to multiple nodes)
# choices can created with a suffix to distinguish them.
with label_scope(_label):
choices = [Categorical(candidates, label=str(i)) for i, candidates in enumerate(recorder.recorded_candidates)]
else:
# Only one choice.
choices = [Categorical(recorder.recorded_candidates[0], label=_label)]
return {c.label: c for c in choices}, new_model
def random(self, memo: Sample | None = None, random_state: RandomState | None = None) -> GraphModelSpace | None:
"""Use :meth:`nni.mutable.Mutable.random` to generate a random sample."""
return Mutable.random(self, memo, random_state)
class MutatorSequence(MutableList):
"""Apply a series of mutators on our model, sequentially.
This could be generalized to a DAG indicating the dependencies between mutators,
but we don't have a use case for that yet.
"""
mutables: list[Mutator]
def __init__(self, mutators: list[Mutator]):
assert all(isinstance(mutator, Mutator) for mutator in mutators), 'mutators must be a list of Mutator'
super().__init__(mutators)
self.model: Optional[GraphModelSpace] = None
@contextmanager
def bind_model(self, model: GraphModelSpace) -> Iterator[MutatorSequence]:
"""Bind the model to a list of mutators.
The model (as well as its successors) will be bounded to the mutators one by one.
The model will be unbinded after the context.
Examples
--------
>>> with mutator_list.bind_model(model):
... mutator_list.freeze(samplers)
"""
try:
self.model = model
yield self
finally:
self.model = None
def leaf_mutables(self, is_leaf: Callable[[Mutable], bool]) -> Iterable[LabeledMutable]:
assert self.model is not None, 'Mutator must be bound to a model before calling `simplify()`.'
model = self.model
with frozen_context(): # ensure_frozen() might be called inside
for mutator in self.mutables:
with mutator.bind_model(model):
yield from mutator.leaf_mutables(is_leaf)
model = mutator.model
assert model is not None
def freeze(self, sample: dict[str, Any]) -> GraphModelSpace:
assert self.model is not None, 'Mutator must be bound to a model before freezing.'
model = self.model
for mutator in self.mutables:
with mutator.bind_model(model):
model = mutator.freeze(sample)
return model
class _RecorderSampler(MutationSampler):
def __init__(self):
self.recorded_candidates: List[List[Choice]] = []
def choice(self, candidates: List[Choice], *args) -> Choice:
self.recorded_candidates.append(candidates)
return candidates[0]
class _FixedSampler(MutationSampler):
def __init__(self, samples):
self.samples = samples
def choice(self, candidates, mutator, model, index):
if not 0 <= index < len(self.samples):
raise RuntimeError(f'Invalid index {index} for samples {self.samples}')
if self.samples[index] not in candidates:
raise RuntimeError(f'Invalid sample {self.samples[index]} for candidates {candidates}')
return self.samples[index]
class _RandomSampler(MutationSampler):
def __init__(self, random_state: RandomState):
self.random_state = random_state
def choice(self, candidates, mutator, model, index):
return self.random_state.choice(candidates)
class InvalidMutation(SampleValidationError):
pass
class Mutation:
"""
An execution of mutation, which consists of four parts: a mutator, a list of decisions (choices),
the model that it comes from, and the model that it becomes.
In general cases, the mutation logs are not reliable and should not be replayed as the mutators can
be arbitrarily complex. However, for inline mutations, the labels correspond to mutator labels here,
this can be useful for metadata visualization and python execution mode.
Attributes
----------
mutator
Mutator.
samples
Decisions/choices.
from_
Model that is comes from.
to
Model that it becomes.
"""
def __init__(self, mutator: 'Mutator', samples: List[Any], from_: GraphModelSpace, to: GraphModelSpace): # noqa: F821
self.mutator: 'Mutator' = mutator # noqa: F821
self.samples: List[Any] = samples
self.from_: GraphModelSpace = from_
self.to: GraphModelSpace = to
def __repr__(self):
return f'Mutation(mutator={self.mutator}, samples={self.samples}, from={self.from_}, to={self.to})'
|
985,558 | c3e6b8ce467969e0755005ca97bb68efec756db6 | #28. Факторизация натурального числа
from collections import Counter
n = int(input("Integer: "))
factors = []
d = 2
m = n
while d * d <= n:
if n % d == 0:
factors.append(d)
n//=d
else:
d += 1
factors.append(n)
f_factors = Counter(factors)
str = str(f_factors)
str = str.replace("Counter({", "")
str = str.replace("})", "")
str = str.replace(": 1", "", len(f_factors))
str = str.replace(", ", "*", len(f_factors))
str = str.replace(": ", "^", len(f_factors))
print(str)
|
985,559 | 7e056a431af3d8f2bc67c4d161c7a6b92ae8700e | # 连接数据库和爬虫
from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://tim:87654321@localhost:3306/douban')
conn = engine.connect()
a = conn.execute('select 1').scalar()
print(a) |
985,560 | 36ed0f31d1281f2648a0e3bd70e706ecf37d53bb | # select.py
# select instances that are not consistent with the condition
import sys
data = []
n = int(sys.argv[1]) # get the number of instances
for i in range(n):
d = input()
data.append(d)
pos = 0
nn = 0
np = 0
j = 0
while j < n:
a = data[j]
cond = (a[4] == 't')
if a[0] == 'p':
pos = pos + 1
if not cond:
print(data[j])
nn = nn + 1
if a[0] == 'p':
np = np +1
j = j + 1
# output
print("*** select result ***\n")
print("basic stat: num. = %d, pos.num. = %d\n" % (n, pos))
print("selected examples: num. = %d, pos.num. = %d\n" % (nn, np))
|
985,561 | 2fab4304672cf5ae7920f00978059808e495d949 | #!/usr/bin/env python3
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Contact: hephaestos@riseup.net - 8764 EF6F D5C1 7838 8D10 E061 CF84 9CE5 42D0 B12B
import re
import subprocess
import platform
import os, sys, signal
import configparser
from time import time, sleep
from datetime import datetime
# We compile this function beforehand for efficiency.
DEVICE_RE = re.compile(".+ID\s(?P<id>\w+:\w+)")
# Set the global settings path
SETTINGS_FILE = '/etc/usbkill/settings.ini'
# Get the current platform
CURRENT_PLATFORM = platform.system().upper()
help_message = """
Usbkill is a simple program with one goal:
Quickly shutdown the computer when a device is inserted or removed.
You can configure a whitelist of ids that are acceptable to insert and the remove,
or a script to check if the screensaver is unlocked.
Usbkill can run without touching system directories (logging to current
directory), or installed into the system. See an example settings.ini.
In order to be able to shutdown the computer using buildin method, this
program needs to run as root. Using external script command you can use
`sudo command' and drop the root requirement.
"""
def log(msg, lsdev=False):
line = str(datetime.now()) + ' ' + msg
print(line)
if not log.path:
return
with open(log.path, 'a') as f:
# Empty line to separate log enties
f.write('\n')
# Log the message that needed to be logged:
f.write(line + '\n')
# Log current usb state:
if lsdev:
f.write('Current state:\n')
if lsdev:
os.system("(lsusb; echo; lspci) >> " + log.path)
log.path = None
def kill_computer(cfg):
"Kill computer using buildin or external method"
if cfg['simulate']:
log("WARNING: Ignoring KILL procedure because of simulation mode")
return
# Log what is happening:
if cfg['kill_cmd']:
os.system(cfg['kill_cmd'])
log("Kill script executed...")
return
# Buildin method of killing the computer
# Sync the filesystem so that the recent log entry does not get
# lost.
# TODO: The external script might do the trick, but sync
# might hang for a longer time sometimes. Suggestion: Execute sync
# in parallel thread and wait at most 1 second for it.
os.system("sync")
# Poweroff computer immediately
if CURRENT_PLATFORM.startswith("DARWIN"):
# OS X (Darwin) - Will reboot
# Use Kernel Panic instead of shutdown command (30% faster and encryption keys are released)
os.system("dtrace -w -n \"BEGIN{ panic();}\"")
elif CURRENT_PLATFORM.endswith("BSD"):
# BSD-based systems - Will shutdown
os.system("shutdown -h now")
else:
# Linux-based systems - Will shutdown
# TODO: I'm not certain if poweroff will clear the keys from RAM.
# I'd use cryptsetup luksSuspend in external script.
os.system("poweroff -f")
log("Buildin kill executed")
def is_unlocked(cfg):
"Check if screen/computer is unlocked"
if not cfg['unlock_cmd']:
return False
ret = os.system(cfg['unlock_cmd'])
if ret == 0:
return True
else:
return False
def lsdev():
"Return a list of connected devices on tracked BUSes"
import glob
devices = []
if CURRENT_PLATFORM == "LINUX":
# USB
path = '/sys/bus/usb*/devices/*/idVendor'
vendors = glob.glob(path)
for entry in vendors:
base = os.path.dirname(entry)
vendor = open(os.path.join(base, 'idVendor')).read().strip()
product = open(os.path.join(base, 'idProduct')).read().strip()
device = vendor + ":" + product
devices.append(device)
# PCI / firewire / other
# TODO: Can device names collide and should we prefix them somehow?
path_lst = [
'/sys/bus/pci/devices',
'/sys/bus/pci_express/devices',
'/sys/bus/firewire/devices',
'/sys/bus/pcmcia/devices',
]
for path in path_lst:
devices += os.listdir(path)
else:
# USB
df = subprocess.check_output("lsusb", shell=True).decode('utf-8')
for line in df.split('\n'):
if line:
info = DEVICE_RE.match(line)
if info:
dinfo = info.groupdict()
devices.append(dinfo['id'])
# PCI
df = subprocess.check_output("lspci", shell=True).decode('utf-8')
for line in df.split('\n'):
if line:
info = line.split(' ')[0]
devices.append(info)
return devices
def load_settings(filename):
"Load settings from config file"
# Load settings from local directory or global - if exists
config = configparser.ConfigParser()
config.read(['./settings.ini', SETTINGS_FILE])
section = config['config']
cfg = {
'sleep_time': float(section['sleep']),
'whitelist': [d.strip() for d in section['whitelist'].split(' ')],
'kill_cmd': section['kill_cmd'],
'unlock_cmd': section['unlock_cmd'],
'kill_on_missing': int(section['kill_on_missing']),
'log_file': section['log_file'],
}
return cfg
def loop(cfg):
"Main loop"
# Main loop that checks every 'sleep_time' seconds if computer should be killed.
# Allows only whitelisted usb devices to connect!
# Does not allow usb device that was present during program start to disconnect!
known_devices = set(lsdev())
# Write to logs that loop is starting:
log("Started patrolling system interfaces every {0} seconds...".format(cfg['sleep_time']),
lsdev=True)
# Main loop
while True:
# List the current usb devices
current_devices = set(lsdev())
new_devices = current_devices - known_devices
removed_devices = known_devices - current_devices
# Check that all current devices are in the set of acceptable devices
for device in new_devices:
if device in cfg['whitelist']:
log("INFO: New whitelisted device connected {0}".format(device), lsdev=True)
known_devices.add(device)
continue
# New unknown device was connected
if is_unlocked(cfg):
log("INFO: New unknown device {0} connected while unlocked".format(device), lsdev=True)
known_devices.add(device)
continue
# New unknown device connected while not unlocked.
log("WARNING: New not-whitelisted device {0} detected - killing the computer...".format(device),
lsdev=True)
kill_computer(cfg)
known_devices.add(device)
# Check that all start devices are still present in current devices
if removed_devices:
desc = ", ".join(removed_devices)
if is_unlocked(cfg):
log("INFO: Device/s {0} disconnected while unlocked".format(desc))
known_devices -= removed_devices
continue
# We are locked. And something got disconnected
if cfg['kill_on_missing'] != 1:
log("INFO: Device/s {0} disconnected but kill_on_missing disabled".format(desc))
known_devices -= removed_devices
continue
log("WARNING: Device/s {0} disconnected while locked - killing the computer...".format(desc))
kill_computer(cfg)
known_devices -= removed_devices
sleep(cfg['sleep_time'])
def exit_handler(signum, frame):
log("Exiting because exit signal was received")
sys.exit(0)
def test(cfg):
"Test kill procedure"
if is_unlocked(cfg):
log("Device is currently unlocked (visible devices may change)")
else:
log("Device is locked (changes in visible devices cause a kill)")
print()
log("WARNING: Executing a test of a kill procedure in 10 seconds")
sleep(5)
log("5 seconds left... (Ctrl-C to cancel)")
sleep(5)
log("Executing a kill procedure")
os.system('sync')
kill_computer(cfg)
def main():
"Check arguments and run program"
import argparse
p = argparse.ArgumentParser(description="usbkill", epilog=help_message)
#p.add_argument("-h", "--help", dest="help",
# action="store_true",
# help="show help")
p.add_argument("--test", dest="test",
action="store_true",
help="test kill and unlock procedure")
p.add_argument("--simulate", dest="simulate",
action="store_true",
help="do everything, but don't kill device")
args = p.parse_args()
# Register handlers for clean exit of loop
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:
signal.signal(sig, exit_handler)
# Load settings
cfg = load_settings(SETTINGS_FILE)
cfg['simulate'] = args.simulate
log.path = cfg['log_file']
log("Starting with whitelist: " + ",".join(cfg['whitelist']) )
if args.simulate:
log("WARNING: Simulation mode enabled")
# Check if program is run as root, else exit.
# Root is needed to power off the computer.
if args.simulate is False and not cfg['kill_cmd'] and os.geteuid() != 0:
print("\nThis program needs to run as root to use the buildin kill method.\n")
sys.exit(1)
# Start the main loop
if args.test:
test(cfg)
else:
loop(cfg)
if __name__=="__main__":
main()
|
985,562 | 9d7f60bc0b0ee686f818dfa74cac0aad955e5d07 | import arrow
import os
from datetime import datetime, timedelta
from django.db import models
from django.utils.timesince import timesince
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from django.dispatch import receiver
from django.db.models import Sum
from django.template.defaultfilters import slugify
from common.models import User
from common.utils import convert_to_custom_timezone
class Tag(models.Model):
name = models.CharField(max_length=500)
color = models.CharField(max_length=20,
default="#999999", verbose_name=_("color"))
created_by = models.ForeignKey(User,
related_name="marketing_tags",
null=True, on_delete=models.SET_NULL)
created_on = models.DateTimeField(auto_now_add=True)
@property
def created_by_user(self):
return self.created_by if self.created_by else None
class EmailTemplate(models.Model):
created_by = models.ForeignKey(
User, related_name="marketing_emailtemplates",
null=True, on_delete=models.SET_NULL)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=5000)
subject = models.CharField(max_length=5000)
html = models.TextField()
class Meta:
ordering = ['id', ]
@property
def created_by_user(self):
return self.created_by if self.created_by else None
@property
def created_on_arrow(self):
return arrow.get(self.created_on).humanize()
class ContactList(models.Model):
created_by = models.ForeignKey(
User, related_name="marketing_contactlist",
null=True, on_delete=models.SET_NULL)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=500)
tags = models.ManyToManyField(Tag)
# is_public = models.BooleanField(default=False)
visible_to = models.ManyToManyField(
User, related_name="contact_lists_visible_to")
class Meta:
ordering = ('-created_on',)
@property
def created_by_user(self):
return self.created_by if self.created_by else None
@property
def created_on_format(self):
return self.created_on.strftime('%b %d, %Y %I:%M %p')
@property
def created_on_since(self):
now = datetime.now()
difference = now.replace(tzinfo=None) - \
self.created_on.replace(tzinfo=None)
if difference <= timedelta(minutes=1):
return 'just now'
return '%(time)s ago' % {
'time': timesince(self.created_on).split(', ')[0]}
@property
def tags_data(self):
return self.tags.all()
@property
def no_of_contacts(self):
return self.contacts.all().count()
@property
def no_of_campaigns(self):
return self.campaigns.all().count()
@property
def unsubscribe_contacts(self):
return self.contacts.filter(is_unsubscribed=True).count()
@property
def bounced_contacts(self):
return self.contacts.filter(is_bounced=True).count()
@property
def no_of_clicks(self):
clicks = CampaignLog.objects.filter(
contact__contact_list__in=[self]).aggregate(Sum(
'no_of_clicks'))['no_of_clicks__sum']
return clicks
@property
def created_on_arrow(self):
return arrow.get(self.created_on).humanize()
@property
def updated_on_arrow(self):
return arrow.get(self.updated_on).humanize()
class Contact(models.Model):
phone_regex = RegexValidator(
regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+999999999'. \
Up to 20 digits allowed."
)
created_by = models.ForeignKey(
User, related_name="marketing_contacts_created_by",
null=True, on_delete=models.SET_NULL)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
contact_list = models.ManyToManyField(ContactList, related_name="contacts")
name = models.CharField(max_length=500)
email = models.EmailField()
contact_number = models.CharField(
validators=[phone_regex], max_length=20, blank=True, null=True)
is_unsubscribed = models.BooleanField(default=False)
is_bounced = models.BooleanField(default=False)
company_name = models.CharField(max_length=500, null=True, blank=True)
last_name = models.CharField(max_length=500, null=True, blank=True)
city = models.CharField(max_length=500, null=True, blank=True)
state = models.CharField(max_length=500, null=True, blank=True)
contry = models.CharField(max_length=500, null=True, blank=True)
def __str__(self):
return self.email
@property
def created_on_arrow(self):
return arrow.get(self.created_on).humanize()
class Meta:
ordering = ['id', ]
class FailedContact(models.Model):
phone_regex = RegexValidator(
regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+999999999'.\
Up to 20 digits allowed."
)
created_by = models.ForeignKey(
User, related_name="marketing_failed_contacts_created_by", null=True,
on_delete=models.SET_NULL)
created_on = models.DateTimeField(auto_now_add=True)
contact_list = models.ManyToManyField(
ContactList, related_name="failed_contacts")
name = models.CharField(max_length=500, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
contact_number = models.CharField(
validators=[phone_regex], max_length=20, blank=True, null=True)
company_name = models.CharField(max_length=500, null=True, blank=True)
last_name = models.CharField(max_length=500, null=True, blank=True)
city = models.CharField(max_length=500, null=True, blank=True)
state = models.CharField(max_length=500, null=True, blank=True)
contry = models.CharField(max_length=500, null=True, blank=True)
def __str__(self):
return self.email
def get_campaign_attachment_path(self, filename):
file_split = filename.split('.')
file_extension = file_split[-1]
path = "%s_%s" % (file_split[0], str(datetime.now()))
return "campaigns/attachment/" + slugify(path) + "." + file_extension
class Campaign(models.Model):
STATUS_CHOICES = (
('Scheduled', 'Scheduled'),
('Cancelled', 'Cancelled'),
('Sending', 'Sending'),
('Preparing', 'Preparing'),
('Sent', 'Sent'),
)
title = models.CharField(max_length=5000)
created_by = models.ForeignKey(
User, related_name="marketing_campaigns_created_by",
null=True, on_delete=models.SET_NULL)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
contact_lists = models.ManyToManyField(
ContactList, related_name="campaigns")
email_template = models.ForeignKey(
EmailTemplate, blank=True, null=True, on_delete=models.SET_NULL)
schedule_date_time = models.DateTimeField(blank=True, null=True)
timezone = models.CharField(max_length=100, default='UTC')
reply_to_email = models.EmailField(blank=True, null=True)
subject = models.CharField(max_length=5000)
html = models.TextField()
html_processed = models.TextField(default="", blank=True)
from_email = models.EmailField(blank=True, null=True)
from_name = models.EmailField(blank=True, null=True)
sent = models.IntegerField(default='0', blank=True)
opens = models.IntegerField(default='0', blank=True)
opens_unique = models.IntegerField(default='0', blank=True)
bounced = models.IntegerField(default='0')
tags = models.ManyToManyField(Tag)
status = models.CharField(
default="Preparing", choices=STATUS_CHOICES, max_length=20)
attachment = models.FileField(
max_length=1000, upload_to=get_campaign_attachment_path, blank=True, null=True)
class Meta:
ordering = ('-created_on', )
@property
def no_of_unsubscribers(self):
unsubscribers = self.campaign_contacts.filter(
contact__is_unsubscribed=True).count()
return unsubscribers
@property
def no_of_bounces(self):
bounces = self.campaign_contacts.filter(
contact__is_bounced=True).count()
return bounces
@property
def no_of_clicks(self):
clicks = self.marketing_links.aggregate(Sum('clicks'))['clicks__sum']
return clicks
@property
def no_of_sent_emails(self):
contacts = self.campaign_contacts.count()
return contacts
@property
def created_on_format(self):
return self.created_on.strftime('%b %d, %Y %I:%M %p')
@property
def sent_on_format(self):
if self.schedule_date_time:
c_schedule_date_time = convert_to_custom_timezone(
self.schedule_date_time, self.timezone)
return c_schedule_date_time.strftime('%b %d, %Y %I:%M %p')
else:
c_created_on = convert_to_custom_timezone(
self.created_on, self.timezone)
return c_created_on.strftime('%b %d, %Y %I:%M %p')
@property
def get_all_emails_count(self):
email_count = CampaignLog.objects.filter(campaign=self).count()
return email_count
# return self.contact_lists.exclude(contacts__email=None).values_list('contacts__email').count()
@property
def get_all_email_bounces_count(self):
# return self.contact_lists.filter(contacts__is_bounced=True
# ).exclude(contacts__email=None).values_list('contacts__email').count()
email_count = CampaignLog.objects.filter(campaign=self,contact__is_bounced=True).count()
return email_count
@property
def get_all_emails_unsubscribed_count(self):
# return self.contact_lists.filter(contacts__is_unsubscribed=True
# ).exclude(contacts__email=None).values_list('contacts__email').count()
email_count = CampaignLog.objects.filter(campaign=self,contact__is_unsubscribed=True).count()
return email_count
@property
def get_all_emails_subscribed_count(self):
return self.get_all_emails_count - self.get_all_email_bounces_count - self.get_all_emails_unsubscribed_count
@property
def get_all_emails_contacts_opened(self):
contact_ids = CampaignOpen.objects.filter(
campaign=self).values_list('contact_id', flat=True)
# opened_contacts = Contact.objects.filter(id__in=contact_ids)
# return opened_contacts
return contact_ids.count()
@property
def sent_on_arrow(self):
if self.schedule_date_time:
c_schedule_date_time = convert_to_custom_timezone(
self.schedule_date_time, self.timezone)
# return c_schedule_date_time.strftime('%b %d, %Y %I:%M %p')
return arrow.get(c_schedule_date_time).humanize()
else:
c_created_on = convert_to_custom_timezone(
self.created_on, self.timezone)
# return c_created_on.strftime('%b %d, %Y %I:%M %p')
return arrow.get(self.created_on).humanize()
@receiver(models.signals.pre_delete, sender=Campaign)
def comment_attachments_delete(sender, instance, **kwargs):
attachment = instance.attachment
if attachment:
try:
if os.path.isfile(attachment.path):
os.remove(attachment.path)
except Exception:
return False
return True
class Link(models.Model):
campaign = models.ForeignKey(
Campaign, related_name="marketing_links", on_delete=models.CASCADE)
original = models.URLField(max_length=2100)
clicks = models.IntegerField(default='0')
unique = models.IntegerField(default='0')
class Meta:
ordering = ('id',)
class CampaignLog(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
campaign = models.ForeignKey(
Campaign, related_name='campaign_log_contacts', on_delete=models.CASCADE)
contact = models.ForeignKey(
Contact, related_name="marketing_campaign_logs",
null=True, on_delete=models.SET_NULL)
message_id = models.CharField(max_length=1000, null=True, blank=True)
class CampaignLinkClick(models.Model):
campaign = models.ForeignKey(
Campaign, on_delete=models.CASCADE, related_name="campaign_link_click")
link = models.ForeignKey(
Link, blank=True, null=True, on_delete=models.CASCADE)
ip_address = models.GenericIPAddressField()
created_on = models.DateTimeField(auto_now_add=True)
user_agent = models.CharField(max_length=2000, blank=True, null=True)
contact = models.ForeignKey(
Contact, blank=True, null=True, on_delete=models.CASCADE)
class CampaignOpen(models.Model):
campaign = models.ForeignKey(
Campaign, on_delete=models.CASCADE, related_name='campaign_open')
ip_address = models.GenericIPAddressField()
created_on = models.DateTimeField(auto_now_add=True)
user_agent = models.CharField(max_length=2000, blank=True, null=True)
contact = models.ForeignKey(
Contact, blank=True, null=True, on_delete=models.CASCADE, related_name='contact_campaign_open')
class CampaignCompleted(models.Model):
""" This Model Is Used To Check If The Scheduled Later Emails Have Been Sent
related name : campaign_is_completed
"""
campaign = models.OneToOneField(
Campaign, on_delete=models.CASCADE, related_name='campaign_is_completed')
is_completed = models.BooleanField(default=False)
class ContactUnsubscribedCampaign(models.Model):
""" This Model Is Used To Check If The Contact has Unsubscribed To a Particular Campaign
related name : contact_is_unsubscribed
"""
campaigns = models.ForeignKey(
Campaign, on_delete=models.CASCADE, related_name='campaign_is_unsubscribed')
contacts = models.ForeignKey(
Contact, on_delete=models.Case, related_name='contact_is_unsubscribed')
is_unsubscribed = models.BooleanField(default=False)
class ContactEmailCampaign(models.Model):
"""
send all campaign emails to this contact
"""
name = models.CharField(max_length=500)
email = models.EmailField()
last_name = models.CharField(max_length=500, null=True, blank=True)
created_by = models.ForeignKey(
User, related_name="marketing_contacts_emails_campaign_created_by",
null=True, on_delete=models.SET_NULL)
created_on = models.DateTimeField(auto_now_add=True)
def created_on_arrow(self):
return arrow.get(self.created_on).humanize() |
985,563 | 114b45f83d3cfbc4d40cb798dd3d74cda1f2e74e | from typing import TYPE_CHECKING, Optional, Sequence
import dagster._check as check
from ..execution.context.hook import BoundHookContext, UnboundHookContext
from .resource_requirement import ensure_requirements_satisfied
if TYPE_CHECKING:
from ..events import DagsterEvent
from .hook_definition import HookDefinition
def hook_invocation_result(
hook_def: "HookDefinition",
hook_context: Optional[UnboundHookContext],
event_list: Optional[Sequence["DagsterEvent"]] = None,
):
if not hook_context:
hook_context = UnboundHookContext(
resources={}, op=None, run_id=None, job_name=None, op_exception=None, instance=None
)
# Validate that all required resources are provided in the context
ensure_requirements_satisfied(
hook_context._resource_defs, list(hook_def.get_resource_requirements()) # noqa: SLF001
)
bound_context = BoundHookContext(
hook_def=hook_def,
resources=hook_context.resources,
log_manager=hook_context.log,
op=hook_context._op, # noqa: SLF001
run_id=hook_context._run_id, # noqa: SLF001
job_name=hook_context._job_name, # noqa: SLF001
op_exception=hook_context._op_exception, # noqa: SLF001
instance=hook_context._instance, # noqa: SLF001
)
decorated_fn = check.not_none(hook_def.decorated_fn)
return (
decorated_fn(bound_context, event_list)
if event_list is not None
else decorated_fn(bound_context)
)
|
985,564 | 0a884ffe71efccb84c3b68b8ebff9b7a99dc0630 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# https://www.youtube.com/watch?v=NYJoyZHEW04
# Python 3 #2: переменные, оператор присваивания, типы данных
# = - связывания объекта """ "Helo World!!" """ или """ 5 """ с переменной Х
x = "Helo World!!"
print(x)
print(id(x))
print(type(x))
x = 5
print(x)
print(id(x))
print(type(x))
# = оператор каскадного присваивания
a = b = c = 99
print('a = b = c = 99 оператор каскадного присваивания')
print('id(a) = ', id(a))
print('id(b) = ', id(b))
print('id(c) = ', id(c))
# = оператор множественного присваивания
print()
a, b, c = 99, 34, 50
print('a,b,c = 99,34,50 оператор множественного присваивания')
print('id(a) = ', id(a))
print('id(b) = ', id(b))
print('id(c) = ', id(c))
print('a,b ', a, b)
a, b = b, a
print('a,b = b,a оператор множественного перекрестного присваивания')
print('a,b ', a, b)
q= 10
print(f'q type(q), {q}, {type(q)}')
q= 10.3
print(f'q type(q), {q}, {type(q)}')
q = "Helo World!!"
print(f'q type(q), {q}, {type(q)}')
q = "Helo \'World!!\' wwww"
print(f'q type(q), {q}, {type(q)}')
q = True
print(f'q type(q), {q}, {type(q)}') |
985,565 | d0fc2dcf2388e727557870029ad1145e41a7970c | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# external
from mixbox import fields
# internal
import stix
import stix.bindings.stix_common as common_binding
# relative
from .vocabs import VocabField
class Names(stix.EntityList):
_namespace = 'http://stix.mitre.org/common-1'
_binding = common_binding
_binding_class = _binding.NamesType
name = VocabField("Name", multiple=True)
|
985,566 | 215dfed8b4d8040f02d201eb7bf02f9b280d25d1 | # encoding=utf8
# coding=UTF-8
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def enviarEmail(destinatario, titulo, conteudo):
#print destinatario
me = "alertai@serpro.gov.br"
you = destinatario
msg = MIMEMultipart('alternative')
msg['Subject'] = titulo
msg['From'] = me
msg['To'] = you
html = "<html><head><meta charset=\"UTF-8\"></head><body>"+\
conteudo +\
"</p></body></html>"
text = ""
part1 = MIMEText(text, 'plain', 'utf-8')
part2 = MIMEText(html, 'html', 'utf-8')
msg.attach(part1)
msg.attach(part2)
s = smtplib.SMTP('localhost')
s.sendmail(me, you, msg.as_string())
|
985,567 | 91b63dcb3e7d9ce687dd720a477a290434798dd3 | #!/usr/bin/env python
import numpy as np
from myplot.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.cm
lon_0 = -116.0
lat_0 = 33.4
llcrnrlon = -118.0
urcrnrlon = -114.0
llcrnrlat = 31.0
urcrnrlat = 35.0
bm = Basemap(lon_0=lon_0,lat_0=lat_0,
llcrnrlon=llcrnrlon,
llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon,
urcrnrlat=urcrnrlat,
projection='tmerc',resolution='i')
fig,ax = plt.subplots()
x = np.random.random((1000,2))
x = bm.axes_to_geodetic(x,ax)
val = np.sin(2*x[:,0])*np.cos(2*x[:,1])
bm.drawscalar(val,x,resolution=500,zorder=0,topography=True,ax=ax,cmap=matplotlib.cm.viridis)
bm.drawtopography(resolution=500,alpha=0.2,ax=ax)
bm.drawcoastlines()
fig,ax = plt.subplots()
bm.drawscalar(val,x,resolution=500,zorder=0,topography=True,ax=ax,cmap=matplotlib.cm.viridis)
bm.drawtopography(resolution=500,alpha=0.0,ax=ax)
bm.drawcoastlines()
fig,ax = plt.subplots()
bm.drawtopography(resolution=500,alpha=0.2,ax=ax,cmap=matplotlib.cm.viridis)
bm.drawcoastlines()
plt.show()
|
985,568 | f67cca62dc4c153677db20d9cebd2817ec91cdf5 | dictionary = dict(
theta13 = r'$\sin^2 2\theta_13$',
theta13_unit = '',
dmee = r'$\Delta m^2_{\text{ee}}$',
dmee_unit = r'$\text{eV}^2$',
dm21 = r'$\Delta m^2_{21}$',
dm21_unit = r'$\text{eV}^2$',
dm32 = r'$\Delta m^2_{32}$',
dm32_unit = r'$\text{eV}^2$',
psur = r'$P_\mathrm{sur}$',
psur_unit= ''
)
|
985,569 | 15a8f4f1ff842cb0087e76ee3949e35ef75e9c1d | # -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from .viewmodels import ContactViewModel
from .forms import ContactForm
Contact = ContactViewModel()
# Create your views here.
def view(request):
contact_list = Contact.contact_list()
return render(request, 'contact/index.html',
{'contact_list' : contact_list})
def create(request):
if request.method == "GET":
return render(request, 'contact/create.html')
else:
form = ContactForm(request.POST)
if form.is_valid():
Contact.create_contact(request)
return redirect('/contact?message=success form')
else:
return redirect('/contact?message=invalid form')
def update(request, contactId):
if request.method == "GET":
contact = Contact.get_id(contactId)
form = ContactForm(contact.__dict__)
return render(request, 'contact/update.html', {'form':form, 'contactId':contact.id})
else:
form = ContactForm(request.POST)
if form.is_valid():
Contact.update_contact(request, contactId)
return redirect('/contact?message=success form')
else:
return redirect('/contact?message=invalid form')
def delete(request, contactId):
Contact.delete_contact(contactId)
return redirect('/contact')
|
985,570 | 0ea060b1abbfc62f9ec1fa2f4c5c3caa14605607 | import numpy
"""
欧几里得度量
"""
def EDD(l1, l2):
return numpy.sqrt(numpy.sum(numpy.square(numpy.array(l1) - numpy.array(l2))))
if __name__ == '__main__':
print(EDD(
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
))
|
985,571 | 67be688cd594f0a405046bad52c8a15105bfa685 | #!/usr/bin/env python3
# HashTable
# 衝突回避 チェイン法
# 時間計算量 挿入 O(1) 検索 O(1) 削除 O(1)
# 空間計算量 O(n+m) n=テーブルの大きさ m=エントリリストの数
class HashTable(object):
class Cell(object):
def __init__(self, key, value, next=None):
self.key = key
self.value = value
self.next = next
def __init__(self, size):
if size == 0: raise Exception("size is invalid value")
self.size = size
self.table = [None] * self.size
def _hash(self, key):
return hash(key) % self.size
def _get(self, key):
index = self._hash(key)
current = self.table[index]
while current:
if current.key == key:
return True, current
current = current.next
return False, index
def get(self, key):
if key is None: raise Exception("Key is None")
is_exist, cell = self._get(key)
if is_exist: return cell.value
return None
def add(self, key, value):
if key is None: raise Exception("Key is None")
is_exist, cell = self._get(key)
if is_exist:
cell.value = value
else:
new = HashTable.Cell(key, value, self.table[self._hash(key)])
self.table[self._hash(key)] = new
def delete(self, key):
if key is None: raise Exception("Key is None")
index = self._hash(key)
current = self.table[index]
if current.key == key:
self.table[index] = current.next
return
while current.next:
if current.next.key == key:
current.next = current.next.next
return
current = current.next
|
985,572 | 82978c33dc0f9d621c44b5823f6e3d0f225b8859 | from sys import stdin
N = int(input())
arr = []
for i in range(N):
arr.append(int(stdin.readline()))
arr.sort(reverse=True) # 가격이 큰 것부터 처리해야 이득이다.
total = 0
ct = 0
for i in arr:
ct += 1
if ct % 3 == 0: # 세 번째 요소인 경우 넘어감.
ct = 0
continue
total += i
print(total) |
985,573 | 363570690d68568e375e5779da8f614fe4edec4b | #!/usr/bin/env python
"""
pytorch_lifted_loss.py
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
def lifted_loss(score, target, margin=1):
"""
Lifted loss, per "Deep Metric Learning via Lifted Structured Feature Embedding" by Song et al
Implemented in `pytorch`
"""
loss = 0
counter = 0
bsz = score.size(0)
mag = (score ** 2).sum(1).expand(bsz, bsz)
sim = score.mm(score.transpose(0, 1))
dist = (mag + mag.transpose(0, 1) - 2 * sim)
dist = torch.nn.functional.relu(dist).sqrt()
for i in range(bsz):
t_i = target[i].data[0]
for j in range(i + 1, bsz):
t_j = target[j].data[0]
if t_i == t_j:
# Negative component
# !! Could do other things (like softmax that weights closer negatives)
l_ni = (margin - dist[i][target != t_i]).exp().sum()
l_nj = (margin - dist[j][target != t_j]).exp().sum()
l_n = (l_ni + l_nj).log()
# Positive component
l_p = dist[i,j]
loss += torch.nn.functional.relu(l_n + l_p) ** 2
counter += 1
return loss / (2 * counter)
# --
if __name__ == "__main__":
import numpy as np
np.random.seed(123)
score = np.random.uniform(0, 1, (20, 3))
target = np.random.choice(range(3), 20)
print lifted_loss(Variable(torch.FloatTensor(score)), Variable(torch.LongTensor(target))) |
985,574 | 61da755c9b7e936aff82abe93f2563e8be64983e | from flask import Flask
from flask import jsonify, make_response
from flask import request
app = Flask ('the-box-library')
books = [{
'name': 'Jurassic Park',
'author': 'Michael Crichton',
'id': '554',
'category': 'Thriller'
},
{
'name': 'Halo The Fall of Reach',
'author': 'Eric Nylund',
'id': '574',
'category': 'Sci-Fi'
}
]
resp = ''
@app.route('/api/category/books', methods = ['GET','POST'])
def book_api():
if request.method == 'GET':
return jsonify(books)
#resp = jsonify(books)
else:
name = request.values.get('name',None)
author = request.values.get('author',None)
category = request.values.get('category',None)
id_ = request.values.get('id',None)
new_book = {
'name' : name,
'author': author,
'category': category,
'id':id_
}
books.append(new_book)
return jsonify({'OK':'Book added'})
#resp = jsonity({'OK':'Book added'})
return resp
if __name__ == '__main__':
app.run() |
985,575 | f2545d666be324fb6fece70952b0fdb5b0cba13e | __author__ = 'aldnav'
from core import admin
import models
admin.register(models.Person)
admin.register(models.Mosquito) |
985,576 | eed65640954c000771eb91f1113e13a1134fd367 | from kivy.app import App
from kivy.uix.screenmanager import ScreenManager
from Screens.CaptchaScreen import CaptchaScreen, captcha_screen
from Screens.ProxyScreen import ProxyScreen
from Screens.AccountsScreen import AccountsScreen
from Screens.TasksScreen import TasksScreen
from Screens.LogScreen import LogScreen
###Structure
#Database/ File Storage:
# ReadData.py
# FilePaths.py
# CreateDataFiles.py
#
#FrontEnd:
# GUI - Load up the screens
# Screens/* - Folder for all the screens
# -> TaskScreen contains TaskHandler()
#
#Backend:
# TaskHandler() - is Thread dispatcher for all tasks(TaskThread)
# -> TaskThread() The separate thread that runs Task()
# -> Task - calls the separate sites
# CaptchaSolver() - Auto Captcha Handler *** IN THE WORKS(not really) ***
#
#TODO
#Finish TaskHandler -> Add and Delete Tasks()
#Captcha Handling????
#Change the len(getXData()) to a variable in the Screen classes
#Speed up Tensorflow loading -> Delete the resnet50
#Delete Tempfiles -> Located in Appdata/Local/Temp/scoped_*
#### NOW #####
#Learn about cookies
#How are cookies used for captchas
#How to manipulate cookies and bot detection
#Make interaction with site more humanlike
#------------- Maybe do after?
#Log into Gmail for one-click? -> Make a Gmail Trainer?
### Issues ###
# Doing a captcha sucessfully does not save the cookies and will require you to do the captcha again
# -> Not sure if this is due to unsucessful cookie saving or Bot Detection or Ip Ban
## Goal:
# 1. Create a full SneakerBot Model that can run a single task successfully
# 2. Incorporate Better AntiBot Behavior/ Bypass and Include Use of Proxies
# 3. Test and Optimize SneakerBot Model to be more efficient (Speed and Memory wise)
# 4. Add More Site Support
# 5. AutoCaptcha?
# 6. Fully Automate -> Get drop info from Discord Bots and Twitter Bots and create tasks automatically
# and Deploy for complete Automation. Deployment is not necessary. Program can be left on
# This can also include automating the Gmail Trainer.
# 7. FLUFF STUFF (Pretty GUI, More Features?)
#Steps:
# 1. Captcha Handling
# 2. Cookies
# 3. Basic Bot Detection Bypass
# 4. Delete Captcha from captcha_screen.data when completed
# *** ALWAYS ***
# 5. Code Cleanup and Documentation (Includes the README and requirements.txt)
class GUI(App):
def __init__(self):
super().__init__()
def build(self):
self.sm = ScreenManager()
self.createScreens()
return self.sm
def createScreens(self):
task_screen = TasksScreen(name='tasks')
accounts_screen = AccountsScreen(name='accounts')
proxy_screen = ProxyScreen(name='proxy')
#captcha_screen = CaptchaScreen(name='captcha') #Global variable is used in CaptchaScreen.py
log_screen = LogScreen(name='log')
self.sm.add_widget(task_screen)
self.sm.add_widget(accounts_screen)
self.sm.add_widget(proxy_screen)
self.sm.add_widget(captcha_screen)
self.sm.add_widget(log_screen)
if __name__ == "__main__":
GUI().run() |
985,577 | 7674b271c621e97886cbbbbee8751d26753ecb03 | from imutils import paths
import face_recognition
import argparse
import pickle
import cv2
import os
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images('/media/nk/Work/smartglass_cloud/facedeeplearning/dataset'))
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb,
model='hog')
encodings = face_recognition.face_encodings(rgb, boxes)
for encoding in encodings:
knownEncodings.append(encoding)
knownNames.append(name)
print("encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open('/media/nk/Work/smartglass_cloud/facedeeplearning/1.pickle', "wb")
f.write(pickle.dumps(data))
f.close()
|
985,578 | 35150251da81aaf7d004ca8da851da7661303d8a | """
This module generates feasible parameter settings, the settings
are in a form of an ordered list
"""
from itertools import combinations, product
from ops import *
from constraints import *
from core import *
import copy
class ParameterSampler(object):
def __init__(self, df, qfn, operationList, substrThresh=0.5, scopeLimit=3):
self.df = df
self.qfn = qfn
self.substrThresh = substrThresh
self.scopeLimit = scopeLimit
self.operationList = operationList
#TODO fix
self.dataset = Dataset(df, {'a':'cat', 'b':'cat'})
#self.dataset = Dataset(df, {'a':'cat'})
def getParameterGrid(self):
parameters = []
paramset = [(op, sorted(op.paramDescriptor.values()), op.paramDescriptor.values()) for op in self.operationList]
for op, p, orig in paramset:
if p[0] == ParametrizedOperation.COLUMN:
#remove one of the cols
origParam = copy.copy(orig)
orig.remove(p[0])
colParams = []
for col in self.columnSampler():
grid = []
for pv in orig:
grid.append(self.indexToFun(pv, col))
#todo fix
augProduct = []
for p in product(*grid):
v = list(p)
v.insert(0, col)
augProduct.append(tuple(v))
colParams.extend(augProduct)
parameters.append((op, colParams, origParam))
else:
grid = []
for pv in orig:
grid.append(self.indexToFun(pv))
parameters.append( (op, product(*grid), orig))
#print(parameters)
return parameters
def getAllOperations(self):
parameterGrid = self.getParameterGrid()
operations = []
for i , op in enumerate(self.operationList):
args = {}
#print(parameterGrid[i][1])
for param in parameterGrid[i][1]:
arg = {}
for j, k in enumerate(op.paramDescriptor.keys()):
arg[k] = param[j]
#print(arg)
operations.append(op(**arg))
return operations
def indexToFun(self, index, col=None):
if index == ParametrizedOperation.COLUMN:
return self.columnSampler()
elif index == ParametrizedOperation.COLUMNS:
return self.columnsSampler()
elif index == ParametrizedOperation.VALUE:
return self.valueSampler(col)
elif index == ParametrizedOperation.SUBSTR:
return self.substrSampler(col)
elif index == ParametrizedOperation.PREDICATE:
return self.predicateSampler(col)
else:
raise ValueError("Error in: " + index)
def columnSampler(self):
return self.df.columns.values.tolist()
def columnsSampler(self):
columns = self.columnSampler()
result = []
for i in range(1, min(len(columns), self.scopeLimit)):
result.extend([list(a) for a in combinations(columns, i)])
return result
def valueSampler(self, col):
#print("--",col, list(set(self.df[col].values)))
return list(set(self.df[col].values))
def substrSampler(self, col):
chars = {}
for v in self.df[col].values:
for c in set(v):
if c not in chars:
chars[c] = 0
chars[c] += 1
return [c for c in chars if (chars[c]+0.)/self.df.shape[0] > self.substrThresh]
"""
Brute Force
def predicateSampler(self, col):
columns = self.columnSampler()
columns.remove(col)
projection = self.df[columns]
tuples = set([tuple(x) for x in projection.to_records(index=False)])
result_list = []
for t in tuples:
result_list.append(lambda s, p=t: (s[columns].values.tolist() == list(p)))
return result_list
"""
def predicateSampler(self, col):
return self.dataset.getPredicates(self.qfn)
|
985,579 | 099c1f73e539e9fae485e18bf5cd0f2d1d695e15 | ## FLip files
rule get_flip_plus_to_forward:
input:
manifest = lambda wc: config["manifest"][wc.dataset]
output:
flipfile = "run_folder/flip/{dataset}_FORWARD.flip"
shell:
"""
Rscript bin/format_input/update_reference.R {input.manifest} {output.flipfile} 2
"""
rule get_flip_plus_from_top_bot:
input:
manifest = lambda wc: config["manifest"][wc.dataset]
output:
flipfile = "run_folder/flip/{dataset}_PLUS.flip"
shell:
"""
Rscript bin/format_input/update_reference.R {input.manifest} {output.flipfile} 0
"""
rule get_flip_top_bot_from_top:
input:
manifest = lambda wc: config["manifest"][wc.dataset]
output:
flip_top_bot = "run_folder/flip/{dataset}_TP.flip"
shell:
"""
Rscript bin/format_input/update_reference.R {input.manifest} {output.flip_top_bot} 1
"""
rule get_allele_order:
input:
manifest = lambda wc: config["manifest"][wc.dataset]
output:
allele_order = "run_folder/allele_order/{dataset}_{suffix}_order.txt"
shell:
"""
Rscript bin/format_input/get_allele_order.R {input.manifest} {wildcards.suffix} {output.allele_order}
"""
## Format rules
rule set_bed_forward:
"""
Update chromosome and position of ids given supplied dbsnp.map
"""
input:
bed = "run_folder/bed/{dataset}_PLUS.bed",
flipfile = "run_folder/flip/{dataset}_FORWARD.flip"
log:
"logs/{dataset}_update_ref.log"
output:
bed = "run_folder/bed/{dataset}_FORWARD.bed",
fam = "run_folder/bed/{dataset}_FORWARD.fam",
bim = "run_folder/bed/{dataset}_FORWARD.bim"
run:
input_pattern = re.sub("\\.bed", "", input[0])
output_pattern = re.sub("\\.bed", "", output.bed)
shell(f"plink1.9 --real-ref-alleles --allow-no-sex --bfile {input_pattern} --flip {input.flipfile} "
f" --not-chr 0 --set-hh-missing --make-bed --out {output_pattern} &> {log}")
rule set_bed_plus:
"""
Update chromosome and position of ids given supplied dbsnp.map
"""
input:
bed = "run_folder/bed/{dataset}_TP.bed",
flipfile = "run_folder/flip/{dataset}_PLUS.flip"
log:
"logs/{dataset}_update_ref.log"
output:
bed = "run_folder/bed/{dataset}_PLUS.bed"
run:
input_pattern = re.sub("\\.bed", "", input[0])
output_pattern = re.sub("\\.bed", "", output.bed)
shell(f"plink1.9 --real-ref-alleles --allow-no-sex --bfile {input_pattern} --flip {input.flipfile} "
f"--make-bed --out {output_pattern} &> {log}")
rule set_bed_top_bot:
input:
bed = "run_folder/bed/{dataset}_TOP.bed",
flip_top_bot = "run_folder/flip/{dataset}_TP.flip"
output:
bed = "run_folder/bed/{dataset}_TP.bed"
run:
input_pattern = re.sub("\\.bed", "", input.bed)
output_pattern = re.sub("\\.bed", "", output.bed)
shell(f"plink1.9 --real-ref-alleles --allow-no-sex --bfile {input_pattern} "
f"--flip {input.flip_top_bot} --make-bed --out {output_pattern}")
rule format_bed:
"""
Format pad to bed
"""
params:
plink = config["plink"]
wildcard_constraints:
suffix = "(TOP|PLUS)",
input:
ped = "input/{dataset}_{suffix}.ped"
# allele_order = "run_folder/allele_order/{dataset}_{suffix}_order.txt"
output:
bed = "run_folder/bed/{dataset}_{suffix}.bed"
run:
input_pattern = re.sub("\\.ped", "", input.ped)
output_pattern = re.sub("\\.bed", "", output.bed)
shell(f"plink1.9 --allow-no-sex --file {input_pattern} "
f"--not-chr 0 --set-hh-missing "
f"--make-bed --out {output_pattern}")
# f"--a2-allele {input.allele_order} "
|
985,580 | da2ad8c301da9e0a338676fecb72e72cf62274f6 | # -*- coding: utf-8 -*-
from Pages.PageObject import PageObject
import time
class ITProPage(PageObject):
firstHandle = ""
secondHandle = ""
def __init__(self, driver):
PageObject.__init__(self, driver)
def click_picture(self):
self.firstHandle = self.driver.window_handles[0]
picture =\
self.waiting_element_by_xpath("//img[@alt=\"小江戸らぐ\"]")
#self.driver.save_screenshot("C:\\home\\hirofumi\\koedo\\a.jpg")
self.click(picture)
for handle in self.driver.window_handles:
if handle != self.firstHandle:
self.secondHandle = handle
self.driver.switch_to_window(self.secondHandle)
picture =\
self.waiting_element_by_xpath("//img[@src=\"koedlug.jpg\"]")
time.sleep(5)
return self
def quit(self):
self.driver.switch_to_window(self.secondHandle)
self.driver.close()
self.driver.switch_to_window(self.firstHandle)
self.driver.quit()
def click_PC_button(self):
PC_button =\
self.waiting_element_by_xpath("//img[@src=\"/images/n/itpro/2010/leaf/btn_pc.gif\"]")
self.click(PC_button)
return self
|
985,581 | aa2f67bcb2d532e5195adf15c6bc6fd908cf951b | from Utility.actionKeys import MakeAction
from .otpForm import Otp
import time
class Login(object):
findBy = 'xpath'
findByTag = 'tag_name'
fld_username = "//input[@name='email']"
fld_password = "//input[@name='password']"
fld_btnLogin = "//span[contains(text(), 'Log In')]"
tag_for_scroll = 'body'
def __init__(self, driver):
self.driver = driver
self.run = MakeAction(driver)
self.otp = Otp(driver)
def setUsername(self, username):
time.sleep(2)
self.run.find_element_and_input(self.findBy, self.fld_username, 1, username)
def setPassword(self, password):
time.sleep(2)
self.run.find_element_and_input(self.findBy, self.fld_password, 1, password)
def clickLogin(self):
time.sleep(2)
self.run.click_element(self.findBy, self.fld_btnLogin, 1)
def setScrollDown(self):
self.run.web_scroll("down", self.findByTag, self.tag_for_scroll)
def setScrollUp(self):
self.run.web_scroll("up", self.findByTag, self.tag_for_scroll)
def login(self, username, password):
# self.setScrollDown()
# time.sleep(5)
self.setUsername(username)
self.setPassword(password)
self.clickLogin()
self.otp.run_otp()
# time.sleep(5)
|
985,582 | e6356884ee97cb896102cb0c5b3751d8a0906917 | import numpy as np
from keras import backend as K
import warnings
class ReduceLROnPlateau():
def __init__(self, model, curmonitor=np.Inf, factor=0.1, patience=10, mode='min',
min_delta=1e-4, cooldown=0, min_lr=0, verbose=1,
**kwargs):
self.curmonitor = curmonitor
if factor > 1.0:
raise ValueError('ReduceLROnPlateau does not support a factor > 1.0.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.model = model
self.verbose = verbose
self.monitor_op = None
self._reset()
def _reset(self):
if self.mode == 'min':
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def update_monitor(self, curmonitor):
self.curmonitor = curmonitor
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, curmonitor):
curlr = K.get_value(self.model.optimizer.lr)
self.curmonitor = curmonitor
if self.curmonitor is None:
warnings.warn('errro input of monitor', RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(self.curmonitor, self.best):
self.best = self.curmonitor
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing '
'learning rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
return curlr
def in_cooldown(self):
return self.cooldown_counter > 0 |
985,583 | 4bd02844f348d3aa4439f719b628eae2026ec790 | # -*- coding: utf-8 -*-
import os,sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
from prms_par import prms_par
import subprocess
from subprocess import PIPE, STDOUT
class Prms_base(prms_par):
def __init__(self, gsflow_control = None):
self.exe_prms = None # Todo : remove
self.gsflow_control = gsflow_control
self.control_file = None
self.prms_data = None
self.prms_parameters = None
def join_rel_abs_path(self,relpath,abspath):
dfile = relpath
wc = abspath
if os.path.isabs(dfile):
fnn = dfile
elif relpath[0] != '.':
fnn = os.path.join(abspath,relpath)
else:
if sys.platform == "linux" or sys.platform == "linux2":
fileparts = dfile.split("/")
wcparts = wc.split("/")
elif sys.platform == "win32":
fileparts = dfile.split("/")
wcparts = wc.split("\\")
del(fileparts[0])
del(wcparts[-1])
part1 = '\\'.join(wcparts)
part2 = '\\'.join(fileparts)
fnn = os.path.join(part1, part2)
if sys.platform == "linux" or sys.platform == "linux2":
fnn = "/" + fnn
return fnn
def _get_file_abs(self, fn):
control_folder = os.path.dirname(self.control_file)
abs_file = os.path.abspath(os.path.join(control_folder, fn[0]))
return abs_file
def read_data_file(self):
data_file = self.gsflow_control['data_file'][2]
data_file = self._get_file_abs(data_file)
data_items = ['tmax', 'tmin', 'precip', 'runoff', 'pan_evap', 'solrad', 'from_data', 'rain_day']
data_dict = dict()
fid = open(data_file, 'r')
data_dict['Comments'] = fid.readline().strip()
columns = []
while True:
line = fid.readline()
if line.strip() == '' or line.strip()[0:2] == '//':
continue
if "####" in line:
break
if any(item in line for item in data_items):
val_nm = line.strip().split()
for val in range(int(val_nm[1])):
columns.append(val_nm[0]+"_" + str(val))
columns = ['Year', 'Month', 'Day', 'Hour', 'Minut', 'Second'] + columns
data_dict['data'] = pd.read_csv(fid, delim_whitespace=True, names=columns)
fid.close()
self.prms_data = data_dict
def read_para_file(self):
# loop over muptiple files if there is any
parafiles = self.gsflow_control['param_file'][2]
wc = self.work_directory
par_order = []
dim_order = []
par_widths = dict()
Dimensions = dict()
Parameters = dict()
dimen_part = []
para_part = []
for file in parafiles:
fnn = self.join_rel_abs_path(file, wc)
with open(fnn, 'r') as data_file:
content = data_file.read()
# the content consists of two parts: Dimensions & Parameters
# Split the file content into two parts based on the delimiter "**"
content = content.split("**")
read_param_flg = False
read_dim_flg = False
for ig, fgroup in enumerate(content):
# always skip first record
if "Parameters" in fgroup:
read_param_flg = True
continue
if "Dimensions" in fgroup:
read_dim_flg = True
continue
if read_param_flg:
if len(para_part)>0:
if para_part[-1] == '\n' and fgroup[0]=='\n':
fgroup = fgroup[1:]
elif para_part[-1] != '\n' and fgroup[0]!='\n':
para_part = para_part + "\n"
para_part = para_part + fgroup
else:
para_part = fgroup
read_param_flg = False
continue
if read_dim_flg:
if len(dimen_part)>0:
if len(dimen_part) > 0:
if dimen_part[-1] == '\n' and fgroup[0] == '\n':
fgroup = fgroup[1:]
if dimen_part[-1] != '\n' and fgroup[0] != '\n':
dimen_part = dimen_part + "\n"
dimen_part = dimen_part + fgroup
else:
dimen_part = fgroup
read_dim_flg = False
continue
# if len(content) == 1:
# dimen_part = []
# para_part = content[0]
# else:
# dimen_part = content[2]
# para_part = content[4]
# Split each based on "\n####"
try:
dimen_part= dimen_part.split("\n####\n")
except:
pass
for record in dimen_part:
# each record consists of a record name and a value
if len(record)>0:
rec = record.split("\n")
Dimensions[rec[0]]=rec[1]
dim_order.append(rec[0])
if len(dimen_part) == 0:
para_part = para_part.split("####\n")
else:
para_part = para_part.split("\n####\n")
idx = 0
for record in para_part:
if record == '':
pass
else:
idx = idx + 1
print idx
# each record consists of 7 parts
if len(record)>0:
rec = record.split("\n")
# 1) is the name
par_name = rec[0].split()[0]
par_order.append(par_name)
# 2) this is width, something not used in prms
try:
width = rec[0].split()[1] # not sure Ask Rich
par_widths[par_name]=width
except:
par_widths[par_name] = ''
# 3) No_dimension : 1d versus 2d
try:
no_dim = int(rec[1])
except:
pass
# dimension names
dim_names = []
indx = 2
for dim in np.arange(no_dim):
dim_names.append(rec[indx])
indx = indx + 1
nvalues = rec[indx]
indx = indx + 1
value_type = int(rec[indx])
indx = indx + 1
values = rec[indx:]
if values[-1]=='':
del(values[-1])
if value_type == 1: # int
values = [int(value) for value in values]
elif value_type == 2 or value_type == 3: # real
if values[-1]=='####':
del values[-1]
values = [float(value) for value in values]
values = np.array(values)
Parameters[par_name] = [no_dim, dim_names,nvalues, value_type, values]
prms_param_file = dict()
prms_param_file['Dimensions'] = Dimensions
prms_param_file['Parameters'] = Parameters
prms_param_file['widths'] = par_widths
self.prms_parameters = prms_param_file
self.fields_order['par_order'] = par_order
self.fields_order['dim_order'] = dim_order
def _load(self):
self.read_data_file()
print ("Reading the parameters file ....")
self.read_para_file()
pass
def run(self):
fn = self.control_file_name
fparts = fn.split('\\')
del (fparts[-1])
fn2 = '\\'.join(fparts)
script_dir = os.getcwd()
os.chdir(fn2)
os.system("gsflow.bat")
os.chdir(script_dir)
def get_parameter(self, name):
"""
:param name:
:return:
"""
dims = self.prms_parameters['Dimensions'].keys()
params = self.prms_parameters['Parameters'].keys()
if name in dims:
curr_par = self.prms_parameters['Dimensions'][name]
par_object = prms_par()
par_object.name = name
par_object.values = int(curr_par[1])
elif name in params:
curr_par = self.prms_parameters['Parameters'][name]
par_object = prms_par()
par_object.name = name
par_object.read_param(curr_par)
return par_object
else:
str_err = name + " is not a defined parameter"
print str_err
def set_parameter(self, par):
# par is a list that has the all param info
pass
def write_seperate_param_file(self, fn):
pass
def write_param_file(self, fn):
par_dict = self.prms_parameters
dims = par_dict['Dimensions']
parms = par_dict['Parameters']
dim_order = self.fields_order['dim_order']
par_order = self.fields_order['par_order']
fid = open(fn,'w')
# write header
header1 = "Generated by pyprms, Author: Ayman Alzraiee\n"
header2 = "Version: 1.7\n"
fid.write(header1)
fid.write(header2)
# write the dimension part
fid.write('** Dimensions **\n')
for dim in dim_order:
print dim
fid.write('####\n')
fid.write(dim)
fid.write('\n')
fid.write(str(dims[dim]))
fid.write('\n')
# write the parameters
fid.write('** Parameters **\n')
for par in par_order:
fid.write('####\n')
curr_par = parms[par]
# parameter name
fid.write(par)
fid.write('\n')
# number of dimensions
fid.write(str(curr_par[0]))
fid.write('\n')
# dimensions names
for pp in curr_par[1]:
fid.write(pp)
fid.write('\n')
# number of values
fid.write(str(curr_par[2]))
fid.write('\n')
# type of values
fid.write(str(curr_par[3]))
fid.write('\n')
# values
for val in curr_par[4]:
if curr_par[3] == 1: # int
fid.write(str(int(val)))
fid.write('\n')
else:
fid.write(str(val))
fid.write('\n')
#fid.write('####\n')
fid.close()
def write_control_file(self,fn):
cont_dict = self.gsflow_control
field_order = self.fields_order['control_order']
fid = open(fn, 'w')
# write header
header1 = "GSFLOW control File. Generated by pyprms, Author: Ayman Alzraiee\n"
fid.write(header1)
for par in field_order:
fid.write('####\n')
curr_par = cont_dict[par]
# parameter name
fid.write(par)
fid.write('\n')
# number of values
fid.write(str(curr_par[0][0]))
fid.write('\n')
# data type
fid.write(str(curr_par[1]))
fid.write('\n')
# values
for val in curr_par[2]:
fid.write(str(val))
fid.write('\n')
fid.close()
def write_data_file(self,fn):
data_dict = self.prms_data
climate_keys = ['precip','tmax', 'tmin', 'solrad', 'pan_evap', 'runoff','from_data']
existing_keys = data_dict.keys()
fid = open(fn, 'w')
# write header
header1 = "Generated by pyprms, Author: Ayman Alzraiee\n"
fid.write(header1)
# write data types and number of stations
header2 = ''
Mdata = np.array([])
for ckey in climate_keys:
if ckey in existing_keys:
ts_data = data_dict[ckey]
if not (type(ts_data) == np.ndarray):
ts_data = np.array(ts_data)
nsta = ts_data.shape[1]
lin = ckey+ ' ' + str(nsta) + '\n'
fid.write(lin)
header2 = header2 + ' '+ ckey
if Mdata.shape[0]==0:
Mdata = ts_data
else:
Mdata = np.hstack((Mdata,ts_data))
# write data header
header2 = '################### '+header2 + '\n'
fid.write(header2)
# write data
cdate = data_dict['Date']
indx = 0
for row in Mdata:
curr_date = cdate[indx]
if type(curr_date) == datetime.date:
curr_date = curr_date.strftime("%Y %m %d 0 0 0 ")
str1 = curr_date
else:
str1 = ''.join(str(e) + ' ' for e in curr_date)
str2 = ''.join("%10.4f"%e + ' ' for e in row)
fid.write(str1 + str2)
fid.write('\n')
indx = indx + 1
fid.close()
pass
def add_parameter(self):
pass
def remove_parameter(self, pr):
self.prms_parameters
try:
del (self.prms_parameters['Parameters'][pr])
self.fields_order['par_order'].remove(pr)
except ValueError:
print "Cannot remove " + pr + "parameter......"
|
985,584 | a806aec976813bd873964170a36dad845b1564e9 | #!/usr/bin/python3
# Copyright (C) 2006-2021 Istituto Italiano di Tecnologia (IIT)
# Copyright (C) 2006-2010 RobotCub Consortium
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD-3-Clause license. See the accompanying LICENSE file for details.
import yarp
# create the network
yarp.Network.init()
# define ports
outport = yarp.BufferedPortBottle()
# activate ports
outport.open("/writer")
top = 100
for i in range(1,top):
# prepare a message to send
bottle = outport.prepare()
bottle.clear()
bottle.addString("Hello")
bottle.addInt32(i)
print ("Sending ", bottle.toString())
# send the message
outport.write()
yarp.delay(0.5)
# deactivate ports
outport.close()
# close the network
yarp.Network.fini() |
985,585 | f983c25b8b4b7e02147f56b86808bf18cb15ab7e | import bitmex_basic
|
985,586 | 79522e40b92e401f714da0ebb986547aa247a380 | # A game where users attempt to guess a random number between 0 and 20
import random
correctNumber = random.randint(0,20)
numberOfAttempts = 1 #Takes at least 1 try
print('I am thinking of a number between 1 and 20.')
while True:
print('Take a guess.')
guess = int(input())
if guess == correctNumber:
print('Good job! You guessed my number in ' + str(numberOfAttempts) + ' guesses!')
break
elif guess < correctNumber:
print('Your guess is too low.')
else:
print('Your guess is too high.')
numberOfAttempts = numberOfAttempts + 1
|
985,587 | b5059341c5e542c7af8cea3243bdf869e56a2fdb | # Generated by Django 2.2.2 on 2019-08-03 04:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Kanji',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kanji', models.CharField(max_length=50, null=True)),
('image', models.ImageField(null=True, upload_to='')),
('meaning', models.CharField(max_length=100, null=True)),
('strokes_count', models.PositiveIntegerField()),
],
),
]
|
985,588 | 5bfe08dd1f2824c2207501f47b10cdfb7a91a882 | from django.core.management.base import BaseCommand
from optparse import make_option
from workspace.exceptions import *
from microsites.exceptions import *
from workspace.middlewares.catch import ExceptionManager as WorkspaceExceptionManagerMiddleWare
from microsites.middlewares.catch import ExceptionManager as MicrositesExceptionManagerMiddleWare
from django.test.client import RequestFactory
from django.contrib.auth.models import AnonymousUser
from django.conf import settings
import os
from core.auth.auth import AuthManager
import re
from core.shortcuts import render_to_response
from workspace.manageDatasets.forms import *
from django.template import Context,Template
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
class Object(object):
def __init__(self):
self.id = 0
self.revision = 0
class Command(BaseCommand):
help = "Command to raise exceptions on demand for test the expection response, not to test cases"
option_list = BaseCommand.option_list + (
make_option('--exception',
dest='exception',
default='',
help='Raise exception'),
make_option('--all',
dest='all',
default='',
help='Raise all exception')
)
def search_text(self,text,expression):
expression = expression.replace("'", "'")
pattern=re.compile(expression, flags=re.IGNORECASE)
return re.search(pattern, text)
def fake_request(self, space, type_response):
request = RequestFactory().get('/'+space+'/', HTTP_ACCEPT=type_response)
request.user = AnonymousUser()
request.auth_manager = AuthManager(language="en")
return request
def print_titulo(self,exception):
print "\n"
print "======================================================================"
print Colors.BLUE + "Testing " + exception + Colors.END
print "----------------------------------------------------------------------"
def generate_exception(self,space,type_response,e):
request = self.fake_request(space,type_response)
if space == 'workspace':
middleware = WorkspaceExceptionManagerMiddleWare()
if space == 'microsites':
middleware = MicrositesExceptionManagerMiddleWare()
ObjHttpResponse = middleware.process_exception(request,e)
self.process_exception(ObjHttpResponse,e,request)
def process_exception(self,ObjHttpResponse,e,request):
html = ObjHttpResponse._container[0]
title = unicode(e.title)
description = unicode(e.description)
print "Descripcion", description
if not self.search_text(html,description):
print Colors.FAIL + "Description not found in html" + Colors.END
else:
print Colors.GREEN + "Description found in html:",description + Colors.END
'''if not self.search_text(html,title):
print Colors.FAIL + "Title not found in html" + Colors.END
else:
print Colors.GREEN + "Title found in html:", title + Colors.END'''
print "Status Code:", e.status_code
print "Template:", request.META['PATH_INFO']+e.template
print "Type of response:",request.META['HTTP_ACCEPT']
print "Type Exception:", e.tipo
def handle(self, *args, **options):
settings.TEMPLATE_DIRS = list(settings.TEMPLATE_DIRS)
settings.TEMPLATE_DIRS.append(os.path.join(settings.PROJECT_PATH, 'workspace', 'templates'))
settings.TEMPLATE_DIRS.append(os.path.join(settings.PROJECT_PATH, 'microsites', 'templates'))
settings.TEMPLATE_DIRS = tuple(settings.TEMPLATE_DIRS)
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.append('microsites')
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS)
print Colors.HEADER + "\_/ Testing expection \_/" + Colors.END
''' Instance generic Objects for test '''
InstancedForm = DatasetFormFactory(0).create()
argument = Object()
if options['exception']:
self.print_titulo(options['exception'])
e = Exception.__new__(eval(options['exception']))
space = 'microsites'
type_response = 'text/html'
request = self.fake_request(space,type_response)
if space == 'workspace':
middleware = WorkspaceExceptionManagerMiddleWare()
if space == 'microsites':
middleware = MicrositesExceptionManagerMiddleWare()
ObjHttpResponse = middleware.process_exception(request,e)
self.process_exception(ObjHttpResponse,e,request)
if options['all']:
self.print_titulo("DATALException")
e = DATALException()
space = 'workspace'
type_response = 'text/html'
self.generate_exception(space,type_response,e)
self.print_titulo("LifeCycleException")
space = 'workspace'
type_response ='text/html'
e = LifeCycleException()
self.generate_exception(space,type_response,e)
self.print_titulo("ChildNotApprovedException")
space = 'workspace'
type_response ='text/html'
e = ChildNotApprovedException(argument)
self.generate_exception(space,type_response,e)
self.print_titulo("SaveException")
space = 'workspace'
type_response = 'text/html'
if InstancedForm.is_valid():
print Colors.FAIL + "Valid form, no expection generated." + Colors.END
else:
e = SaveException(InstancedForm)
self.generate_exception(space,type_response,e)
self.print_titulo("DatastreamSaveException")
space = 'workspace'
type_response ='text/html'
if InstancedForm.is_valid():
print "Valid form, no expection generated."
else:
e = DatastreamSaveException(InstancedForm)
self.generate_exception(space,type_response,e)
self.print_titulo("VisualizationSaveException")
space = 'workspace'
type_response ='text/html'
if InstancedForm.is_valid():
print "Valid form, no expection generated."
else:
e = VisualizationSaveException(InstancedForm)
self.generate_exception(space,type_response,e)
self.print_titulo("DatasetNotFoundException")
space = 'workspace'
type_response ='text/html'
e = DatasetNotFoundException()
self.generate_exception(space,type_response,e)
self.print_titulo("DataStreamNotFoundException")
space = 'workspace'
type_response ='text/html'
e = DataStreamNotFoundException()
self.generate_exception(space,type_response,e)
self.print_titulo("VisualizationNotFoundException")
space = 'workspace'
type_response ='text/html'
e = VisualizationNotFoundException()
self.generate_exception(space,type_response,e)
self.print_titulo("VisualizationRequiredException")
space = 'workspace'
type_response ='text/html'
e = VisualizationRequiredException()
self.generate_exception(space,type_response,e)
self.print_titulo("IllegalStateException")
space = 'workspace'
type_response ='text/html'
e = IllegalStateException()
self.generate_exception(space,type_response,e)
self.print_titulo("ApplicationException")
space = 'workspace'
type_response ='text/html'
e = ApplicationException()
self.generate_exception(space,type_response,e)
self.print_titulo("DatastoreNotFoundException")
space = 'workspace'
type_response ='text/html'
e = DatastoreNotFoundException()
self.generate_exception(space,type_response,e)
self.print_titulo("MailServiceNotFoundException")
space = 'workspace'
type_response ='text/html'
e = MailServiceNotFoundException()
self.generate_exception(space,type_response,e)
self.print_titulo("SearchIndexNotFoundException")
space = 'workspace'
type_response ='text/html'
e = SearchIndexNotFoundException()
self.generate_exception(space,type_response,e)
self.print_titulo("S3CreateException")
space = 'workspace'
type_response ='text/html'
e = S3CreateException("Descripcion error class in __init__")
self.generate_exception(space,type_response,e)
self.print_titulo("S3UpdateException")
space = 'workspace'
type_response ='text/html'
e = S3UpdateException("Descripcion error class in __init__")
self.generate_exception(space,type_response,e)
self.print_titulo("ParentNotPublishedException")
space = 'workspace'
type_response ='text/html'
e = ParentNotPublishedException("Descripcion error class in __init__")
self.generate_exception(space,type_response,e)
self.print_titulo("DatastreamParentNotPublishedException")
space = 'workspace'
type_response ='text/html'
request = self.fake_request(space, type_response)
e = DatastreamParentNotPublishedException(argument)
self.generate_exception(space,type_response,e)
self.print_titulo("VisualizationParentNotPublishedException")
space = 'workspace'
type_response ='text/html'
e = VisualizationParentNotPublishedException()
self.generate_exception(space,type_response,e)
self.print_titulo("ResourceRequiredException")
space = 'workspace'
type_response ='text/html'
e = ResourceRequiredException()
self.generate_exception(space,type_response,e)
self.print_titulo("AnyResourceRequiredException")
space = 'workspace'
type_response ='text/html'
e = AnyResourceRequiredException()
self.generate_exception(space,type_response,e)
self.print_titulo("DatasetRequiredException")
space = 'workspace'
type_response ='text/html'
e = DatasetRequiredException()
self.generate_exception(space,type_response,e)
self.print_titulo("DatastreamRequiredException")
space = 'workspace'
type_response ='text/html'
e = DatastreamRequiredException()
self.generate_exception(space,type_response,e)
self.print_titulo("AnyDatasetRequiredException")
space = 'workspace'
type_response ='text/html'
e = AnyDatasetRequiredException()
self.generate_exception(space,type_response,e)
self.print_titulo("AnyDatastreamRequiredException")
space = 'workspace'
type_response ='text/html'
e = AnyDatastreamRequiredException()
self.generate_exception(space,type_response,e)
self.print_titulo("InsufficientPrivilegesException")
space = 'workspace'
type_response ='text/html'
e = InsufficientPrivilegesException()
self.generate_exception(space,type_response,e)
self.print_titulo("RequiresReviewException")
space = 'workspace'
type_response ='text/html'
e = RequiresReviewException()
self.generate_exception(space,type_response,e)
'''
Test microsites exceptions
'''
self.print_titulo("VisualizationRevisionDoesNotExist")
space = 'microsites'
type_response ='text/html'
e = VisualizationRevisionDoesNotExist()
self.generate_exception(space,type_response,e)
self.print_titulo("VisualizationDoesNotExist")
space = 'microsites'
type_response ='text/html'
e = VisualizationDoesNotExist()
self.generate_exception(space,type_response,e)
self.print_titulo("AccountDoesNotExist")
space = 'microsites'
type_response ='text/html'
e = AccountDoesNotExist()
self.generate_exception(space,type_response,e)
self.print_titulo("InvalidPage")
space = 'microsites'
type_response ='text/html'
e = InvalidPage()
self.generate_exception(space,type_response,e)
self.print_titulo("DataStreamDoesNotExist")
space = 'microsites'
type_response ='text/html'
e = DataStreamDoesNotExist()
self.generate_exception(space,type_response,e)
self.print_titulo("DatasetDoesNotExist")
space = 'microsites'
type_response ='text/html'
e = DatasetDoesNotExist()
self.generate_exception(space,type_response,e)
self.print_titulo("DatsetError")
space = 'microsites'
type_response ='text/html'
e = DatsetError()
self.generate_exception(space,type_response,e)
self.print_titulo("NotAccesVisualization")
space = 'microsites'
type_response ='text/html'
e = NotAccesVisualization()
self.generate_exception(space,type_response,e)
print "\n"
print Colors.BLUE + " \~ END TEST \~" + Colors.END
|
985,589 | 48368f574b6d1523fac82801c905955bac884ddc | # -*- coding: utf-8 -*-
from keras.layers import Add, Dense, Concatenate, concatenate, multiply, Reshape, RepeatVector, Permute, add, Flatten, Lambda
from keras.engine.topology import Layer
from keras import backend as K
import numpy as np
import tensorflow as tf
# from attention_layer_old import AttentionWithContext
from attention_layer import AttentionWithContext
# attention_size = 2
# class AttLayer(Layer):
# def __init__(self, **kwargs):
# self.hidden_dim = attention_size
# super(AttLayer, self).__init__(**kwargs)
# def build(self, input_shape):
# self.W = self.add_weight(shape=(input_shape[-1], self.hidden_dim), initializer='he_normal', trainable=True)
# self.bw = self.add_weight(shape=(self.hidden_dim,), initializer='zero', trainable=True)
# # self.uw = self.add_weight(shape=(self.hidden_dim,), initializer='he_normal', trainable=True)
# self.trainable_weights = [self.W, self.bw]
# super(AttLayer, self).build(input_shape)
# def call(self, x, mask=None):
# # print(K.shape(x))
# # x_reshaped = tf.reshape(x, [K.shape(x)[0] * K.shape(x)[1], K.shape(x)[-1]])
# # ui = K.tanh(K.dot(x_reshaped, self.W) + self.bw)
# # intermed = K.sum(multiply([self.uw, ui]), axis=1)
# #
# # weights = tf.nn.softmax(tf.reshape(intermed, [K.shape(x)[0], K.shape(x)[1]]), dim=-1)
# # weights = tf.expand_dims(weights, axis=-1)
# #
# # weighted_input = x * weights
# # return K.sum(weighted_input, axis=1)
# # x_reshaped = K.reshape(x, [K.shape(x)[0], 2, K.shape(x)[1] // 2])
# # print(K.shape(x_reshaped))
# att = K.softmax(K.dot(x, self.W) + self.bw)
# att = K.reshape(K.tile(K.reshape(att, (K.shape(att)[0], K.shape(att)[1], 1)), [1, 1, 125]), (-1, 250))
# # print('\natt\n')
# # K.eval(x_reshaped)
# # print(K.shape(x_reshaped))
# # return K.reshape(K.dot(att, x_reshaped), (K.shape(x)[0], K.shape(x)[1] // 2))
# return att
# def compute_output_shape(self, input_shape):
# return (input_shape[0], input_shape[1])
class InvMul(Layer):
def __init__(self, array, in_count, **kwargs):
self.factor = tf.constant(value=array, dtype=tf.float32)
self.count = in_count
super(InvMul, self).__init__(**kwargs)
def call(self, mat, mask=None):
inv_mat = tf.py_func(np.linalg.pinv, [self.factor], tf.float32)
inv_mat = tf.tile(tf.reshape(inv_mat, [1, K.shape(inv_mat)[0], K.shape(inv_mat)[1]]), [K.shape(mat)[0], 1, 1])
mat = tf.reshape(mat, [K.shape(mat)[0], 1, K.shape(mat)[1]])
res = tf.matmul(inv_mat, mat)
return res
def compute_output_shape(self, input_shape):
return (input_shape[0], self.count, input_shape[1])
class AttLayer(Layer):
def __init__(self, att_size, **kwargs):
self.hidden_dim = att_size
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.W = self.add_weight(shape=(input_shape[-1], self.hidden_dim), initializer='he_normal', trainable=True)
self.bw = self.add_weight(shape=(self.hidden_dim,), initializer='zero', trainable=True)
self.trainable_weights = [self.W, self.bw]
super(AttLayer, self).build(input_shape)
def call(self, x, mask=None):
att = K.softmax(K.dot(x, self.W) + self.bw)
return att
def compute_output_shape(self, input_shape):
return (input_shape[0], self.hidden_dim)
# class MatMul(Layer):
# def __init__(self, left_shape, right_shape, **kwargs):
# self.left_shape = left_shape
# self.right_shape = right_shape
# super(MatMul, self).__init__(**kwargs)
#
# def call(self, mat_pair, mask=None):
# '''
# mat_pair: a tuple or list of the two matrixs to be dot multiplied
# '''
# left, right = mat_pair
# x = tf.matmul(left, right)
# print(K.shape(x))
# return x
#
# def compute_output_shape(self, input_shape):
# print('==========================')
# print(input_shape)
# print('==========================')
# return (input_shape[0][0], self.left_shape[0], self.right_shape[1])
class MatMul(Layer):
def __init__(self, left_shape, right_shape, **kwargs):
self.left_shape = left_shape
self.right_shape = right_shape
super(MatMul, self).__init__(**kwargs)
def call(self, mat_pair, mask=None):
'''
mat_pair: a tuple or list of the two matrixs to be dot multiplied
'''
left, right = mat_pair
left=K.expand_dims(left)
right=K.expand_dims(right,axis=1)
x = tf.matmul(left, right)
return x
def compute_output_shape(self, input_shape):
return (input_shape[0][0], self.left_shape[0], self.right_shape[1])
class SumLayer(Layer):
def __init__(self, axis, **kwargs):
self.t_axis = axis
super(SumLayer, self).__init__(**kwargs)
def call(self, x, mask=None):
return K.sum(x, axis=self.t_axis)
def compute_output_shape(self, input_shape):
return tuple([input_shape[i] for i in range(len(input_shape)) if not i == self.t_axis])
class Pinv(Layer):
def call(self, mat, mask=None):
'''
mat: must be a 3-D tensor
'''
inv_mat = tf.map_fn(lambda x: tf.py_func(np.linalg.pinv, [x], tf.float32), mat)
return inv_mat
def compute_output_shape(self, input_shape):
print('==========================')
print(input_shape)
print('==========================')
return (input_shape[0], input_shape[2], input_shape[1])
# def get_pho_rep(pho_0, pho_1, pho_dim):
# # pho_0 = tf.expand_dims(pho_0, axis=1)
# # pho_1 = tf.expand_dims(pho_1, axis=1)
# pho = concatenate([pho_0, pho_1], axis=1)
# # att = Dense(units=2, use_bias=True, activation='softmax')(pho)
# # pho_stack = Reshape((2, attention_size, ))(pho)
# # att = Permute((2, 1))(RepeatVector(attention_size)(att))
# # x = multiply([att, pho_stack])
# weighted = Reshape((2, 125, ))(multiply([pho, AttLayer()(pho)]))
# x = SumLayer(1)(weighted)
# return x
def tensor_split(x, start, end):
'''
Length of tensor_split(x, 0, 100) is 100.
'''
return x[:, start:end]
def tensor_slice(x, i):
return x[:, i, :]
# def get_weighted(inp, dim):
# in_count = len(inp)
# x = concatenate(inp, axis=1)
# att = Dense(units=in_count, activation='softmax', use_bias=True)(x)
#
# pho = Reshape((in_count, dim, ))(x)
# att = Reshape((1, in_count, ))(att)
#
# x = MatMul(left_shape=(1, in_count), right_shape=(in_count, dim))([att, pho])
# x = Reshape((dim, ))(x)
#
# return x, att
def get_weighted(inp, dim):
in_count = len(inp)
result, att = AttentionWithContext(hidden_dim=200)(inp)
return result, att
# def get_weighted(inp, dim):
# m = MatMul((dim, 1), (1, dim))(inp)
# result_1 = AttentionWithContext(hidden_dim=200)(m)
# m = Permute((2, 1))(m)
# result_2 = AttentionWithContext(hidden_dim=200)(m)
# result = Add()([result_1, result_2])
# a = 0
# return result, a
def de_attention(inp, dim, out_count):
x, att = inp
x = Reshape((1, dim, ))(x)
att = Reshape((1, out_count, ))(att)
att = Pinv()(att)
output = MatMul(left_shape=(out_count, 1), right_shape=(1, dim))([att, x])
# output = Flatten()(output)
return [Lambda(tensor_slice, arguments={'i': i})(output) for i in range(out_count)]
def inv_mul(mat, array, k):
t = InvMul(array, k)(mat)
return [Lambda(tensor_slice, arguments={'i': i})(t) for i in range(len(array[0]))]
def get_pho_rep(pho_0, pho_1, pho_dim):
a_0 = Dense(units=pho_dim, activation='sigmoid')(pho_0)
a_1 = Dense(units=pho_dim, activation='sigmoid')(pho_1)
_x_0 = multiply([pho_0, a_0])
_x_1 = multiply([pho_1, a_1])
x = add([_x_0, _x_1])
return x
|
985,590 | a1010fcaba45f13ca3adae5bd87bf88e9c3e56fc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-z
import time
import urllib.request
import json
import Adafruit_MCP9808.MCP9808 as MCP9808
sensor = MCP9808.MCP9808()
sensor.begin()
timestamp = lambda: int(round(time.time() * 1000))
while True:
temp = sensor.readTempC()
curTimeStamp = str(timestamp())
# REST API uses an additional header - "Appbase-Secret"
headers = {
'Content-Type': 'application/json',
'Appbase-Secret': '9d7f14bc1ecabc8b47ed176e4e1772cd'
}
values = { 'data': { 'temperature' : temp, 'nowtimestamp':curTimeStamp } }
# Send "PATCH" request to update properties
request = urllib.request.Request('https://api.appbase.io/tempmonitor/v2/pi/temperature/'+curTimeStamp+'/~properties', data=json.dumps(values), headers=headers)
request.get_method = lambda: 'PATCH'
try:
x = urllib2.urlopen(request)
print(x.read())
print("leitura1")
except e:
print(e)
values = { 'data': { curTimeStamp : {"path":"pi/temperature/"+curTimeStamp } } }
# Send "PATCH" request to create an edge.
request = urllib.request.Request('https://api.appbase.io/tempmonitor/v2/pi/temperature/~edges', data=json.dumps(values), headers=headers)
request.get_method = lambda: 'PATCH'
try:
x = urllib2.urlopen(request)
print(x.read())
print("leitura2")
except e:
print(e)
time.sleep(5.0) |
985,591 | 9ceea996c158b3f915661d2895e87c8339c8fb03 | #
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# LAB 12-2 판다스로 울릉도의 바람 세기 분석하기, 322쪽
#
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
weather = pd.read_csv('d:/data/weather.csv', encoding='CP949')
monthly = [ None for x in range(12) ] # 달별로 구분된 12개의 데이터프레임
monthly_wind = [ 0 for x in range(12) ] # 각 달의 평균 풍속을 담을 리스트
# 마지막에 해당 행의 데이터가 측정된 달을 기록한 열을 추가
weather['month'] = pd.DatetimeIndex(weather['일시']).month
for i in range(12) :
monthly[i] = weather[ weather['month'] == i + 1 ] # 달별로 분리
monthly_wind[i] = monthly[i].mean()['평균풍속'] # 개별 데이터 분석
plt.plot(monthly_wind, 'red')
plt.show() |
985,592 | 2b1265692173ece37e704fddbaf75faef233c0fc | class Solution:
def dayOfYear(self, date: str):
_DAYS_IN_MONTH = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def is_leapyear(year):
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def days_in_month(year, month):
if month == 1 and is_leapyear(year):
print(is_leapyear(year))
return 29
else:
return _DAYS_IN_MONTH[month]
d = date.split('-')
year, month, day = int(d[0]), int(d[1]), int(d[2])
print(year, month, day)
return sum([days_in_month(year, m) for m in range(0, month - 1)]) + day
class Solution2:
def dayOfYear(self, date: str) -> int:
y, m, d = map(int, date.split('-'))
days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (y % 400) == 0 or ((y % 4 == 0) and (y % 100 != 0)): days[1] = 29
return d + sum(days[:m-1])
class Solution3:
def dayOfYear(self, date: str) -> int:
y, m, d = (int(x) for x in date.split("-"))
days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30]
return sum(days[:(m-1)]) + d + (m > 2 and (y%4 == 0 and y%100 != 0 or y%400 == 0))
answer = Solution()
print(answer.dayOfYear("2004-03-01"))
'''
Given a string date representing a Gregorian calendar date formatted as YYYY-MM-DD, return the day number of the year.
Example 1:
Input: date = "2019-01-09"
Output: 9
Explanation: Given date is the 9th day of the year in 2019.
Example 2:
Input: date = "2019-02-10"
Output: 41
Example 3:
Input: date = "2003-03-01"
Output: 60
Example 4:
Input: date = "2004-03-01"
Output: 61
Constraints:
date.length == 10
date[4] == date[7] == '-', and all other date[i]'s are digits
date represents a calendar date between Jan 1st, 1900 and Dec 31, 2019.
''' |
985,593 | 978cc1dd82be1453655bb39ce2f5b8441d44de79 | # coding=gbk
'''
Created on 2011-8-5
测试最少时间是否为最少,最短路径是否为最短
@author: Administrator
'''
import fixture
from fixture import utility
from navapp import navitool
from navapp import autotest
from navapp import xxtool
import time
import os
def dotest(*args, **kwargs):
its = True
fixture.setup(its)
navitool.set_silence(True)#打开静音,加快测试速度
casesrcdir = kwargs['casesrcdir']
casedstdir = kwargs['casedstdir']
caseno = kwargs['caseno']
try:
fixture.copyfile(os.path.join(casesrcdir,caseno),os.path.join(casedstdir,'tbt0.db'))
except:
fixture.teardown()
raise
refroute = ''
if(kwargs.has_key('refroute')):#途径点
refroute = kwargs['refroute']
bugpoint = None
if(kwargs.has_key('bugpoint')):#截屏前将地图缩放至指定区域
bugpoint = kwargs['bugpoint']
scaleindex = None
if(kwargs.has_key('scaleindex')):#截屏前将地图缩放至指定区域
scaleindex = kwargs['scaleindex']
if its: time.sleep(15)#Liulu说打开its后应等待6s
routeinfo = []
index = 0
for mode in [5,0]:
calctime,r = navitool.navi_route(kwargs['start_end'],'',mode)
autotest.route_record()
autotest.screen_snapshot('route%d.png'%index)
routeinfo.append(r)
if scaleindex != None:navitool.map_zoomscaleindex(scaleindex)
if bugpoint != None:
t = utility.trans_pointlst(bugpoint)
navitool.map_setcenter(t[0]['lon'],t[0]['lat'])
autotest.screen_snapshot('route0%d.png'%index)
index = index + 1
fixture.teardown()
testok = True
return testok, routeinfo
#以下仅为代码测试用
if __name__ == "__main__":
fixture.connectdevice()
testok,rlst = dotest(start_end='11640745 3996754, 11644214 3992500,',bugpoint = '11643897,3992507',scaleindex=16,caseno='20110628T165439+08',casesrcdir='C:\\TestSource\\beijing\\',casedstdir='\\ResidentFlash\\CMMBDATA\\MOT\\TTI\\')
fixture.closedevice()
print(testok,rlst)
|
985,594 | 501d376eefe0c84b459f5657a1b5fe14d805f9b9 | import multiprocessing as mp
# from multiprocessing import Process as Process
from peakME_functions_2018 import *
from map_build_functions import *
from sys import argv
import os
import pandas as pd
if __name__ == '__main__':
threads = mp.cpu_count()
cmf_dir = argv[1] # close match files
mir_file = argv[2] # host file i.e hsa_host output from microME_plus.py
# genome_file = argv[3] # ensembl genome file needs better description? -> humanGenomInfo.tsv
outfile = argv[3] # where to write tfbs_ids
# chip_file = argv[5] # the chip_file lifted to current genome
# get_ensembl_ids(genome_file, chip_file, outfile)
# run first section of make_map
out_gene_ls = []
chip_ensmbl = open(outfile)
out_dir = '{}/map'.format(os.path.split(cmf_dir)[0])
mir_host = pd.DataFrame(pd.read_csv(mir_file, sep='\t', header=0))
container_ls = set(mir_host['gene'])
container_dic = {}
for key in container_ls:
container_dic[key] = 0
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# print('\nOutput directory: ', out_dir)
tf_ensm_dic = {}
for line in chip_ensmbl:
if line.startswith('#'):
continue
line = line.strip().split('\t')
tf_ensm_dic[line[0]] = line[1:]
file_ls = os.listdir(cmf_dir)
file_ls = [item for item in file_ls if not item.startswith('.')]
test_mode = False
# print(container_dic)
# exit()
if test_mode:
assign_microrna2019(file_ls, out_dir, cmf_dir, tf_ensm_dic, container_dic, mir_host)
exit()
load_out = split_load(threads - 1, file_ls)
load_out_ls = list(set(list(load_out)))
# print(load_out_ls)
'''--------------------------------------------------------------------------------------------------------------'''
q = None
jobs = []
loop_count = 1
for key in load_out_ls:
# print(key)
key_ls = load_out[key]
p = mp.Process(target=assign_microrna2019, args=(key_ls, out_dir, cmf_dir, tf_ensm_dic, container_dic, mir_host))
jobs.append(p)
loop_count += 1
for j in jobs:
j.start()
for j in jobs:
j.join()
print(loop_count) |
985,595 | da104f315d72e99cf23c165013d3dc9b1835facb | # %%
import dateutil.parser as dp
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ds = pd.read_csv("../data/output/long_int_id.csv")
# %%
# only extract 3 columns that I'll use in training
ds = ds[['id', 'reviews.date', 'product_id']]
ds.info()
# %%
# convert date type to epoche time stamp
test_ds = ds
# test_ds = test_ds.loc[:10]
def foo(row):
parsed_t = dp.parse(row['reviews.date'])
t_in_seconds = parsed_t.timestamp()
row['reviews.date'] = str(int(t_in_seconds))
row['product_id'] = str(row['product_id'])
return row
test_ds= test_ds.apply(foo, axis=1)
ds = test_ds
#%%
# %%
ds.to_csv('../data/output/3_col_long_int_id.csv', index=False, header=False)
|
985,596 | c900518cbc2580996b50c8c3528af0b08aba17fc | class Humano:
i = 0
def __init__(self):
self.edad = 23
def hablar(self,mensaje):
print("Nombre %s edad %d" % (mensaje ,self.edad))
pedro = Humano()
jaime = Humano()
jaime.hablar("Jaime") |
985,597 | f1c5280c928f110e4cdc732ab606ac4650d6f27a | #import math
import numpy as np
size = 10
x = np.arange(size+1)
print(x, x.shape)
print(x[0])
print(x[2])
print(x[size])
print(x[1:size-1])
x.shape = (2,5)
print(x)
print(x[0:2,1]) |
985,598 | aa03846b99c1491b6d449f9f58f0942bf25a6fe4 | from datetime import datetime
from django.utils.timezone import utc
from django.core.management.base import BaseCommand
from django.conf import settings
from ui.models import Location
from pysnmp.entity.rfc3413.oneliner import cmdgen
class Command(BaseCommand):
help = 'Record current SNMP state of all the VDI machines registered in the system'
def handle(self, *args, **options):
cmdGen = cmdgen.CommandGenerator()
hostnames = Location.objects.values('hostname')\
.filter(os=Location.WINDOWS7)
for hostname in hostnames:
location = Location.objects.get(hostname__iexact=hostname['hostname'])
errorIndication, errorStatus, errorIndex, varBindTable = cmdGen.nextCmd(
cmdgen.CommunityData(settings.SNMP_COMMUNITY_STRING), cmdgen.UdpTransportTarget(
(location.ip_address, 161)),
'1.3.6.1.4.1.25071.1.2.6.1.1.2',
'1.3.6.1.4.1.25071.1.1.2.1.1.3',)
location.observation_time = datetime.utcnow().replace(tzinfo=utc)
if errorIndication:
print(str(location.ip_address) + ': ' + str(errorIndication))
location.state = Location.NO_RESPONSE
else:
if errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBindTable[-1][int(errorIndex)-1] or '?'))
location.state = Location.NO_RESPONSE
else:
hostname = varBindTable[0][0][1]
status = varBindTable[0][1][1]
if status == 0:
location.state = Location.AVAILABLE
print '%s - %s' % (hostname, Location.STATES[0][1])
else:
location.state = Location.LOGGED_IN
print '%s - %s' % (hostname, Location.STATES[1][1])
location.save()
|
985,599 | 103fb469e6ef93ffcacdba09e93bb53b6f132f3d | import math
def calcula_distancia_do_projetil(v, an, y):
return (v**2/9.807)*(1+(1+(2*9.807*y)/(v**2*math.sin(an)**2))**(1/2))*math.sin(2*an)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.