blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb095d9bd599da967746ca52b1d2253d2297c9dd | 54e70081228efb6a6dc8b80b60c8efbf71ef7d7b | /Restaurant/user/models.py | 8cb55c7d81dfd8702e56ed3771dc4da812e49a84 | [] | no_license | Hoangkhang3108/DATH | d98b3dfe4dfc1e64415397cf5ad554d0cac62bba | 719033c074d49e3d0b30110b5f796f756b19788d | refs/heads/master | 2020-05-26T18:38:08.210073 | 2019-05-24T02:18:30 | 2019-05-24T02:18:30 | 188,336,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
class CustomerUser(AbstractUser):
phone_number = models.CharField(default='', max_length=15)
address = models.CharField(default='', max_length=255)
| [
"phkkinz@gmail.com"
] | phkkinz@gmail.com |
c51399d1b9fa6f1634e7e4e9db6c9535140902c6 | 757cbc5ccaa93a25d3cd14daae21c91c948b3475 | /MAP_GMM.py | 15ebba45271403e2308e4383dbe3a632bcb60882 | [] | no_license | simpleplanya/SLR | 7a5b2c0066bac9690fa73ef362d63d868e4b96a4 | 4e2eaee484a9b490d1b443f91d9485314e0f6b29 | refs/heads/master | 2021-04-26T23:52:04.699301 | 2018-05-15T13:53:11 | 2018-05-15T13:53:11 | 123,871,798 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 03 12:35:33 2017
@author: Rocky
"""
from sklearn.mixture import GMM
import numpy as np
from sklearn import datasets
import warnings
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
iris = datasets.load_iris()
X_train = iris.data[0:150]
y_train = iris.target[0:150]
#X_test = iris.data[test_index]
#y_test = iris.target[test_index]
SeModel= GMM(n_components=3,covariance_type='full', init_params='wc', n_iter=100)
SeModel.fit(X_train[:50,[0,2]])
data = X_train[50:100,[0,2]]
sc = StandardScaler()
inter = 1000
while (inter != 0 ):
#print('inter %d'%(inter))
comPro= SeModel.predict_proba(data)
Nk =np.sum(comPro,axis=0)
''' mean '''
numData, numCompoent = comPro.shape
'''2 is fea dim '''
mu_N = np.zeros((numCompoent,2))
for i in range(int(numCompoent)):
for idx , val in enumerate (comPro[:,i]):
mu_N[i,:]+=val*data[idx,:]/Nk[i]
cov = []
Com_num = len(Nk)
sample_num , feaDim = data.shape
'''covar'''
for i in range(Com_num):
cov_k = np.mat(np.zeros((int(feaDim),int(feaDim))))
for j in range(sample_num):
sample = data[j,:]
sample= sample[:,np.newaxis]
''' j sample , i compont'''
cov_k+= (np.matmul(sample,sample.T)*comPro[j,i])/Nk[i]
cov.append(cov_k)
relevance_fac = 16
regular_par = Nk/(Nk+relevance_fac)
com_wei = SeModel.weights_
com_mu = SeModel.means_
com_cov = SeModel.covars_
new_wei = []
new_mu = []
new_cov = []
''' training each compont '''
for i in range(Com_num):
print(regular_par[i])
new_wei.append(regular_par[i]*(Nk[i]/numData)+(1-regular_par[i]*com_wei[i]))
_mu = (regular_par[i]*mu_N[i])+((1-regular_par[i])*com_mu[i,:])
new_mu.append(_mu)
muTmp = com_mu[i]
muTmp = muTmp[:,np.newaxis]
_mu = _mu[:,np.newaxis]
mu_cov = np.matmul(muTmp,muTmp.T)
_mu_cov = np.matmul(_mu,_mu.T)
new_cov.append((regular_par[i]*cov[i])+(1-regular_par[i])*(com_cov[i]+mu_cov)-_mu_cov)
''' update '''
new_wei = new_wei/sum(new_wei)
SeModel.weights_ = np.asarray(new_wei)
SeModel.means_ = np.asarray(new_mu)
SeModel.covars_ = np.asarray(new_cov,dtype=np.float64)
inter -=1
x=SeModel.sample(100)
plt.title('n_components='+str(SeModel.n_components))
plt.scatter(x[:,0],x[:,1],facecolors='none',edgecolors='r',label='adaptation GMM')
plt.scatter(X_train[50:100,0] ,X_train[50:100,2],facecolors='none',edgecolors='g',label='original iris2')
plt.scatter(X_train[0:50,0] ,X_train[0:50,2],facecolors='none',edgecolors='b',label='original iris1')
plt.legend()
| [
"simpleplanya@gmail.com"
] | simpleplanya@gmail.com |
a1b36a3d3e2be1c2571e3bb379ed9f067af445c8 | d93159d0784fc489a5066d3ee592e6c9563b228b | /PhysicsTools/PatAlgos/python/recoLayer0/jetCorrections_cff.py | 6370ee93dd1381a7b0af0826f92278a56fb92a94 | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 263 | py | import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.recoLayer0.jetCorrFactors_cfi import *
from JetMETCorrections.Configuration.JetCorrectionServicesAllAlgos_cff import *
## for scheduled mode
patJetCorrections = cms.Sequence(patJetCorrFactors)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
988152e7a537f6c8badc344c3f79050dc23010ce | 8395ffb48750359d1bd51a201a41c7fe124998bc | /apc2015/hw4_submissions/miles_aubert/hw4.py | 669ce9e1021c1d21b63fd24460920040e18eb4a3 | [] | no_license | duke-iml/ece490-s2016 | ab6c3d3fb159a28a9c38487cdb1ad3993008b854 | f9cc992fbaadedc8a69678ba39f0c9d108e6910d | refs/heads/master | 2020-04-12T09:03:56.601000 | 2016-11-29T21:36:48 | 2016-11-29T21:36:48 | 49,226,568 | 2 | 6 | null | 2016-11-29T21:36:49 | 2016-01-07T19:42:34 | Python | UTF-8 | Python | false | false | 40,546 | py | #!/usr/bin/python
from klampt import *
from klampt.glprogram import *
from klampt import vectorops,so3,se3,gldraw,ik,loader,robotcollide
from klampt.robotsim import Geometry3D,SimBody
from baxter import *
from hw4_planner import *
import apc
import os
import math
import random
from threading import Thread,Lock
from Queue import Queue
#configuration variables
#Question 1,2,3: set NO_SIMULATION_COLLISIONS = 1
#Question 4: set NO_SIMULATION_COLLISIONS = 0
NO_SIMULATION_COLLISIONS = 1
#Set FAKE_SIMULATION to 1 to help fast prototyping of later stages.
#You won't have to wait for the arm to move.
FAKE_SIMULATION = 0
#The path of the klampt_models directory
model_dir = "../klampt_models/"
#resting configuration
baxter_rest_config = [0.0]*60
#the transformation of the order bin
order_bin_xform = (so3.identity(),[0.5,0,0])
#the local bounding box of the order bin
order_bin_bounds = ([-0.2,-0.4,0],[0.2,0.4,0.7])
class KnowledgeBase:
"""A structure containing the robot's dynamic knowledge about the world.
Members:
- bin_contents: a map from bin names to lists of known items in
the bin. Items are given by apc.ItemInBin objects.
- order_bin_contents: the list of objects already in the order bin.
also given by apc.ItemInBin objects
- shelf_xform: the transformation (rotation, translation) of the bottom
center of the shelf in world coordinates. The x coordinate increases
from left to right, the y coordinate increases from bottom to top,
and the z coordinate increases from back to front.
this will be loaded dynamically either from perception or hard coded.
(in this homework assignment we will use the fake perception module
to populate the bin contents, and assume the shelf xform is
estimated perfectly.)
"""
def __init__(self):
self.bin_contents = dict((n,None) for n in apc.bin_names)
self.order_bin_contents = []
self.shelf_xform = se3.identity()
def bin_front_center(self,bin_name):
bmin,bmax = apc.bin_bounds[bin_name]
local_center = [(bmin[0]+bmax[0])*0.5,(bmin[1]+bmax[1])*0.5,bmax[2]]
world_center = se3.apply(self.shelf_xform,local_center)
return world_center
def bin_vantage_point(self,bin_name):
world_center = self.bin_front_center(bin_name)
#20cm offset
world_offset = so3.apply(self.shelf_xform[0],[0,0,0.2])
return vectorops.add(world_center,world_offset)
def grasp_xforms(self,object):
if object.xform == None: return None
res = []
for g in object.info.grasps:
grasp_xform_world = se3.mul(object.xform,g.grasp_xform)
res.append((g,grasp_xform_world))
return res
#a list of actual items -- this is only used for the fake perception module, and your
#code should not use these items directly
ground_truth_items = []
ground_truth_shelf_xform = se3.identity()
def init_ground_truth():
global ground_truth_items
ground_truth_items = [apc.ItemInBin(apc.tall_item,'bin_B'),
apc.ItemInBin(apc.small_item,'bin_D'),
apc.ItemInBin(apc.med_item,'bin_H')]
ground_truth_items[0].set_in_bin_xform(ground_truth_shelf_xform,0.25,0.2,0.0)
ground_truth_items[1].set_in_bin_xform(ground_truth_shelf_xform,0.5,0.1,math.pi/4)
ground_truth_items[2].set_in_bin_xform(ground_truth_shelf_xform,0.6,0.4,math.pi/2)
def run_perception_on_shelf(knowledge):
"""This is a fake perception module that simply reveals the shelf
xform."""
knowledge.shelf_xform = ground_truth_shelf_xform
def run_perception_on_bin(knowledge,bin_name):
"""This is a fake perception module that simply reveals all the items
the given bin."""
global ground_truth_items
if knowledge.bin_contents[bin_name]==None:
#not sensed yet
knowledge.bin_contents[bin_name] = []
for item in ground_truth_items:
if item.bin_name == bin_name:
#place it in the bin
knowledge.bin_contents[bin_name].append(item)
return
class LowLevelController:
"""A low-level interface to the Baxter robot (with parallel jaw
grippers). Does appropriate locking for multi-threaded use.
You should use this in your picking controller."""
def __init__(self,robotModel,robotController):
self.robotModel = robotModel
self.controller = robotController
self.lock = Lock()
def getSensedConfig(self):
self.lock.acquire()
res = self.controller.getSensedConfig()
self.lock.release()
return res
def getSensedVelocity(self):
self.lock.acquire()
res = self.controller.getSensedVelocity()
self.lock.release()
return res
def getCommandedConfig(self):
self.lock.acquire()
res = self.controller.getCommandedConfig()
self.lock.release()
return res
def getCommandedVelocity(self):
self.lock.acquire()
res = self.controller.getCommandedVelocity()
self.lock.release()
return res
def setPIDCommand(self,configuration,velocity):
"""Sets the controller to a PID command mode"""
self.lock.acquire()
self.controller.setPIDCommand(configuration,velocity)
self.lock.release()
def setMilestone(self,destination,endvelocity=None):
"""Immediately sets the motion queue to move to the given
milestone. If endvelocity is given, then the end of the
queue will be moving at that velocity. Otherwise, the end
velocity will be zero."""
self.lock.acquire()
if endvelocity == None: self.controller.setMilestone(destination)
else: self.controller.setMilestone(destination,endvelocity)
self.lock.release()
def appendMilestone(self,destination,endvelocity=None):
"""Appends a milestone to the motion queue. If endvelocity
is given, then the end of the queue will be moving at that velocity.
Otherwise, the end velocity will be zero."""
self.lock.acquire()
if endvelocity == None: self.controller.appendMilestone(destination)
else: self.controller.appendMilestone(destination,endvelocity)
self.lock.release()
def isMoving(self):
return self.controller.remainingTime()>0
def remainingTime(self):
return self.controller.remainingTime()
def commandGripper(self,limb,command):
"""Sends the command to the indicated gripper.
For the parallel-jaw gripper, [0] is closed, [1] is open
Warning: don't do this while moving"""
self.lock.acquire()
q = self.controller.getCommandedConfig()
self.robotModel.setConfig(q)
value = command[0]
if limb=='left':
print "Opening left gripper to",value
self.robotModel.getDriver(15).setValue(value*0.03)
self.robotModel.getDriver(16).setValue(-value*0.03)
else:
print "Opening right gripper to",value
self.robotModel.getDriver(17).setValue(value*0.03)
self.robotModel.getDriver(18).setValue(-value*0.03)
self.controller.setMilestone(self.robotModel.getConfig())
self.lock.release()
class PickingController:
"""Maintains the robot's knowledge base and internal state. Most of
your code will go here. Members include:
- knowledge: a KnowledgeBase object
- planner: an LimbPlanner object, which *you will implement and use*
- state: either 'ready', or 'holding'
- configuration: the robot's current configuration
- active_limb: the limb currently active, either holding or viewing a state
- current_bin: the name of the bin where the camera is viewing or the gripper is located
- held_object: the held object, if one is held, or None otherwise
External modules can call viewBinAction(), graspAction(), ungraspAction(),
and placeInOrderBinAction()
"""
def __init__(self,world,robotController):
self.world = world
self.robot = world.robot(0)
self.controller = robotController
self.knowledge = KnowledgeBase()
self.planner = LimbPlanner(self.world,self.knowledge)
self.state = 'ready'
self.active_limb = None
self.active_grasp = None
self.current_bin = None
self.held_object = None
#these may be helpful
self.left_camera_link = self.robot.getLink(left_camera_link_name)
self.right_camera_link = self.robot.getLink(right_camera_link_name)
self.left_gripper_link = self.robot.getLink(left_gripper_link_name)
self.right_gripper_link = self.robot.getLink(right_gripper_link_name)
self.left_arm_links = [self.robot.getLink(i) for i in left_arm_link_names]
self.right_arm_links = [self.robot.getLink(i) for i in right_arm_link_names]
id_to_index = dict([(self.robot.getLink(i).getID(),i) for i in range(self.robot.numLinks())])
self.left_arm_indices = [id_to_index[i.getID()] for i in self.left_arm_links]
self.right_arm_indices = [id_to_index[i.getID()] for i in self.right_arm_links]
def waitForMove(self,timeout = None, pollRate = 0.5):
"""Waits for the move to complete, or timeout seconds is elapsed,
before terminating."""
iters = 0
t = 0
while self.controller.isMoving():
if iters % 10 == 0:
print "Waiting for move to complete..."
time.sleep(pollRate)
t += pollRate
if timeout != None and t > timeout:
return False
iters += 1
return True
def viewBinAction(self,b):
self.waitForMove()
if self.state != 'ready':
print "Already holding an object, can't move to bin"
return False
else:
if b in apc.bin_names:
if self.move_camera_to_bin(b):
self.waitForMove()
self.current_bin = b
run_perception_on_bin(self.knowledge,b)
print "Sensed bin",b,"with camera",self.active_limb
else:
print "Move to bin",b,"failed"
return False
else:
print "Invalid bin",b
return False
return True
def graspAction(self):
self.waitForMove()
self.controller.commandGripper(self.active_limb,[1])
self.waitForMove()
if self.current_bin == None:
print "Not located at a bin"
return False
elif self.state != 'ready':
print "Already holding an object, can't grasp another"
return False
elif len(self.knowledge.bin_contents[self.current_bin])==0:
print "The current bin is empty"
return False
else:
if self.move_to_grasp_object(self.knowledge.bin_contents[self.current_bin][0]):
self.waitForMove()
#now close the gripper
self.controller.commandGripper(self.active_limb,self.active_grasp.gripper_close_command)
self.waitForMove()
self.held_object = self.knowledge.bin_contents[self.current_bin].pop(0)
self.state = 'holding'
print "Holding object",self.held_object.info.name,"in hand",self.active_limb
return True
else:
print "Grasp failed"
return False
def ungraspAction(self):
self.waitForMove()
if self.state != 'holding':
print "Not holding an object"
return False
else:
if self.move_to_ungrasp_object(self.held_object):
self.waitForMove()
#now open the gripper
self.controller.commandGripper(self.active_limb,self.active_grasp.gripper_open_command)
self.waitForMove()
print "Object",self.held_object.info.name,"placed back in bin"
self.knowledge.bin_contents[self.current_bin].append(self.held_object)
self.state = 'ready'
self.held_object = None
return True
else:
print "Ungrasp failed"
return False
def placeInOrderBinAction(self):
self.waitForMove()
if self.state != 'holding':
print "Not holding an object"
else:
if self.move_to_order_bin(self.held_object):
self.waitForMove()
#now open the gripper
self.controller.commandGripper(self.active_limb,self.active_grasp.gripper_open_command)
self.waitForMove()
print "Successfully placed",self.held_object.info.name,"into order bin"
self.knowledge.order_bin_contents.append(self.held_object)
self.held_object.xform = None
self.held_object.bin_name = 'order_bin'
self.state = 'ready'
self.held_object = None
return True
else:
print "Move to order bin failed"
return False
def fulfillOrderAction(self,objectList):
"""Given a list of objects to be put in the order bin, run
until completed."""
remainingObjects = objectList
for b in apc.bin_names:
if self.knowledge.bin_contents[b]==None:
if not self.viewBinAction(b):
print "Could not view bin",b
continue
donextbin = False
while any(o.info.name in remainingObjects for o in self.knowledge.bin_contents[b]) and not donextbin:
#pick up and put down objects until you are holding one that is in the remainingObjects list
if not self.graspAction():
print "Error grasping object"
donextbin = True
break
while not donextbin and (self.held_object == None or self.held_object.info.name not in remainingObjects):
#cycle through objects by putting down and picking up the next object
if not self.ungraspAction():
print "Error putting down object"
return False
if not self.graspAction():
print "Error grasping object"
donextbin = True
break
obj = self.held_object
if self.placeInOrderBinAction():
remainingObjects.remove(obj.info.name)
else:
print "Error putting object into order bin"
return False
if len(remainingObjects)==0:
return True
print "These items are remaining from the order:",remainingObjects
return False
def randomize_limb_position(self,limb,range=None):
"""Helper: randomizes the limb configuration in self.robot.
limb can be 'left' or 'right'. If range is provided, then
this samples in a range around the current commanded config"""
qmin,qmax = self.robot.getJointLimits()
if range == None:
q = baxter_rest_config[:]
if limb == 'left':
for j in self.left_arm_indices:
q[j] = random.uniform(qmin[j],qmax[j])
else:
for j in self.right_arm_indices:
q[j] = random.uniform(qmin[j],qmax[j])
self.robot.setConfig(q)
else:
q = self.controller.getCommandedConfig()
if limb == 'left':
for j in self.left_arm_indices:
q[j] = max(qmin[j],min(qmax[j],random.uniform(q[j]-range,q[j]+range)))
else:
for j in self.right_arm_indices:
q[j] = max(qmin[j],min(qmax[j],random.uniform(q[j]-range,q[j]+range)))
self.robot.setConfig(q)
return
def move_camera_to_bin(self,bin_name):
"""Starts a motion so the camera has a viewpoint that
observes bin_name. Will also change self.active_limb to the
appropriate limb.
If successful, sends the motion to the low-level controller and
returns True.
Otherwise, does not modify the low-level controller and returns False.
"""
world_offset = self.knowledge.bin_vantage_point(bin_name)
#place +z in the +x axis, y in the +z axis, and x in the -y axis
left_goal = ik.objective(self.left_camera_link,R=[0,0,-1,1,0,0,0,1,0],t=world_offset)
right_goal = ik.objective(self.right_camera_link,R=[0,0,-1,1,0,0,0,1,0],t=world_offset)
qcmd = self.controller.getCommandedConfig()
for i in range(100):
if random.random() < 0.5:
if i == 0:
self.robot.setConfig(qcmd)
else:
self.randomize_limb_position('left')
if ik.solve(left_goal):
if self.planner.check_collision_free('left'):
self.controller.setMilestone(self.robot.getConfig())
self.active_limb = 'left'
return True
else:
if i == 0:
self.robot.setConfig(qcmd)
else:
self.randomize_limb_position('right')
if ik.solve(right_goal):
if self.planner.check_collision_free('right'):
self.controller.setMilestone(self.robot.getConfig())
self.active_limb = 'right'
return True
return False
def move_to_grasp_object(self,object):
"""Sets the robot's configuration so the gripper grasps object at
one of its potential grasp locations. Might change self.active_limb
to the appropriate limb. Must change self.active_grasp to the
selected grasp.
If successful, sends the motion to the low-level controller and
returns True.
Otherwise, does not modify the low-level controller and returns False.
"""
grasps = self.knowledge.grasp_xforms(object)
qmin,qmax = self.robot.getJointLimits()
qcmd = self.controller.getCommandedConfig()
#phase 1: init IK from the commanded config, search among grasps
for (grasp,gxform) in grasps:
if self.active_limb == 'left':
Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
else:
Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
self.robot.setConfig(qcmd)
if ik.solve(goal):
self.controller.setMilestone(self.robot.getConfig())
self.active_grasp = grasp
return True
#Phase 2: that didn't work, now try random sampling
for i in range(100):
#pick a config at random
self.randomize_limb_position(self.active_limb)
#pick a grasp at random
(grasp,gxform) = random.choice(grasps)
if self.active_limb == 'left':
Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
else:
Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
if ik.solve(goal):
self.active_grasp = grasp
#TODO: plan a path
self.controller.setMilestone(self.robot.getConfig())
return True
return False
def move_to_ungrasp_object(self,object):
"""Sets the robot's configuration so the gripper ungrasps the object.
If successful, sends the motion to the low-level controller and
returns True.
Otherwise, does not modify the low-level controller and returns False.
"""
assert len(object.info.grasps) > 0,"Object doesn't define any grasps"
return True
def move_to_order_bin(self,object):
"""Sets the robot's configuration so the gripper is over the order bin
If successful, sends the motion to the low-level controller and
returns True.
Otherwise, does not modify the low-level controller and returns False.
"""
left_target = se3.apply(order_bin_xform,[0.0,0.2,order_bin_bounds[1][2]+0.1])
right_target = se3.apply(order_bin_xform,[0.0,-0.2,order_bin_bounds[1][2]+0.1])
qcmd = self.controller.getCommandedConfig()
for i in range(100):
if self.active_limb == 'left':
goal = ik.objective(self.left_gripper_link,local=left_gripper_center_xform[1],world=left_target)
else:
goal = ik.objective(self.right_gripper_link,local=right_gripper_center_xform[1],world=right_target)
#set IK solver initial configuration
if i==0:
self.robot.setConfig(qcmd)
else:
self.randomize_limb_position(self.active_limb)
#solve
if ik.solve(goal,tol=0.1):
if self.planner.check_collision_free('left'):
self.controller.setMilestone(self.robot.getConfig())
self.active_limb = 'left'
return True
return False
def draw_xformed(xform,localDrawFunc):
"""Draws something given a se3 transformation and a drawing function
that draws the object in its local frame.
E.g., draw_xformed(xform,lambda:gldraw.box([ax,ay,az],[bx,by,bz])) draws
a box oriented and translated by xform."""
mat = zip(*se3.homogeneous(xform))
mat = sum([list(coli) for coli in mat],[])
glPushMatrix()
glMultMatrixf(mat)
localDrawFunc()
glPopMatrix()
def draw_oriented_box(xform,bmin,bmax):
"""Helper: draws an oriented box"""
draw_xformed(xform,lambda:gldraw.box(bmin,bmax))
def draw_wire_box(bmin,bmax):
"""Helper: draws a wireframe box"""
glBegin(GL_LINE_LOOP)
glVertex3f(bmin[0],bmin[1],bmin[2])
glVertex3f(bmin[0],bmin[1],bmax[2])
glVertex3f(bmin[0],bmax[1],bmax[2])
glVertex3f(bmin[0],bmax[1],bmin[2])
glEnd()
glBegin(GL_LINE_LOOP)
glVertex3f(bmax[0],bmin[1],bmin[2])
glVertex3f(bmax[0],bmin[1],bmax[2])
glVertex3f(bmax[0],bmax[1],bmax[2])
glVertex3f(bmax[0],bmax[1],bmin[2])
glEnd()
glBegin(GL_LINES)
glVertex3f(bmin[0],bmin[1],bmin[2])
glVertex3f(bmax[0],bmin[1],bmin[2])
glVertex3f(bmin[0],bmin[1],bmax[2])
glVertex3f(bmax[0],bmin[1],bmax[2])
glVertex3f(bmin[0],bmax[1],bmax[2])
glVertex3f(bmax[0],bmax[1],bmax[2])
glVertex3f(bmin[0],bmax[1],bmin[2])
glVertex3f(bmax[0],bmax[1],bmin[2])
glEnd()
def draw_oriented_wire_box(xform,bmin,bmax):
"""Helper: draws an oriented wireframe box"""
draw_xformed(xform,lambda:draw_wire_box(bmin,bmax))
def run_controller(controller,command_queue):
run_perception_on_shelf(controller.knowledge)
while True:
c = command_queue.get()
if c != None:
print "Running command",c
if c >= 'a' and c <= 'l':
controller.viewBinAction('bin_'+c.upper())
elif c == 'x':
controller.graspAction()
elif c == 'u':
controller.ungraspAction()
elif c == 'p':
controller.placeInOrderBinAction()
elif c == 'o':
controller.fulfillOrderAction(['med_item','small_item'])
elif c=='q':
break
else:
print "Waiting for command..."
time.sleep(0.1)
print "Done"
class FakeLowLevelController:
"""A faked low-level interface to the Baxter robot (with parallel jaw
grippers). Does appropriate locking for multi-threaded use.
Replace LowLevelController with this for prototyping, because you
don't have to wait for motions to complete."""
def __init__(self,robotModel,robotController):
self.robotModel = robotModel
self.config = robotModel.getConfig()
self.lastCommandTime = time.time()
self.lock = Lock()
def getSensedConfig(self):
self.lock.acquire()
res = self.config
self.lock.release()
return res
def getSensedVelocity(self):
return [0.0]*len(self.config)
def getCommandedConfig(self):
self.lock.acquire()
res = self.config
self.lock.release()
return res
def getCommandedVelocity(self):
return [0.0]*len(self.config)
def setPIDCommand(self,configuration,velocity):
"""Sets the controller to a PID command mode"""
self.lock.acquire()
self.config = configuration[:]
self.lastCommandTime = time.time()
self.lock.release()
def setMilestone(self,destination,endvelocity=None):
"""Immediately sets the motion queue to move to the given
milestone. If endvelocity is given, then the end of the
queue will be moving at that velocity. Otherwise, the end
velocity will be zero."""
self.lock.acquire()
self.config = destination[:]
self.lastCommandTime = time.time()
self.lock.release()
def appendMilestone(self,destination,endvelocity=None):
"""Appends a milestone to the motion queue. If endvelocity
is given, then the end of the queue will be moving at that velocity.
Otherwise, the end velocity will be zero."""
self.lock.acquire()
self.config = destination[:]
self.lastCommandTime = time.time()
self.lock.release()
def isMoving(self):
return self.remainingTime() > 0
def remainingTime(self):
return (self.lastCommandTime + 0.1) - time.time()
def commandGripper(self,limb,command):
"""Sends the command to the indicated gripper.
For the parallel-jaw gripper, [0] is closed, [1] is open
Warning: don't do this while moving"""
self.lock.acquire()
self.robotModel.setConfig(self.config)
set_model_gripper_command(self.robotModel,limb,command)
self.config = self.robotModel.getConfig()
self.lastCommandTime = time.time()
self.lock.release()
class MyGLViewer(GLRealtimeProgram):
"""This class is used to simulate / interact with with the world model
in hw4.
Pressing 'a-l' runs the view_bin method which should set the robot to a
configuration that places a hand camera such that it points inside the
bin.
Pressing 's' should pause / unpause the simulation.
Pressing 'x' should "grasp" an object in the currently pointed-to-bin
with either one of the hands at the designated grasp point.
Pressing 'u' should "ungrasp" an object currently grasped inside a bin.
Pressing 'p' should "put down" an object in the order bin
"""
def __init__(self,simworld,planworld):
GLRealtimeProgram.__init__(self,"My GL program")
self.simworld = simworld
self.planworld = planworld
self.sim = Simulator(simworld)
self.simulate = True
#self.sim.simulate(0)
#you can set these to true to draw the bins, grasps, and/or gripper/camera frames
self.draw_bins = False
self.draw_grasps = False
self.draw_gripper_and_camera = True
#initialize controllers
self.low_level_controller = LowLevelController(simworld.robot(0),self.sim.getController(0))
if FAKE_SIMULATION:
self.low_level_controller = FakeLowLevelController(simworld.robot(0),self.sim.getController(0))
else:
self.low_level_controller = LowLevelController(simworld.robot(0),self.sim.getController(0))
self.command_queue = Queue()
self.picking_controller = PickingController(planworld,self.low_level_controller)
self.picking_thread = Thread(target=run_controller,args=(self.picking_controller,self.command_queue))
self.picking_thread.start()
def idle(self):
if self.simulate:
self.sim.simulate(self.dt)
#for Q2
if self.simworld.numRigidObjects() >= len(ground_truth_items):
ofs = self.simworld.numRigidObjects()-len(ground_truth_items)
for i,item in enumerate(ground_truth_items):
T = self.sim.getBody(self.simworld.rigidObject(ofs+i)).getTransform()
item.xform = T
glutPostRedisplay()
def display(self):
#you may run auxiliary openGL calls, if you wish to visually debug
#draw the world
self.sim.updateWorld()
self.simworld.drawGL()
#if you're doing question 1, this will draw the shelf and floor
if self.simworld.numTerrains()==0:
for i in range(self.planworld.numTerrains()):
self.planworld.terrain(i).drawGL()
#draw commanded configurations
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[0,1,0,0.5])
for i in xrange(self.simworld.numRobots()):
r = self.simworld.robot(i)
#q = self.sim.getController(i).getCommandedConfig()
q = self.low_level_controller.getCommandedConfig()
r.setConfig(q)
r.drawGL(False)
glDisable(GL_BLEND)
global ground_truth_items
#show bin boxes
if self.draw_bins:
glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[1,1,0,1])
for b in apc.bin_bounds.values():
draw_oriented_box(self.picking_controller.knowledge.shelf_xform,b[0],b[1])
for b in apc.bin_names:
c = self.picking_controller.knowledge.bin_front_center(b)
if c:
glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[1,1,0.5,1])
r = 0.01
gldraw.box([c[0]-r,c[1]-r,c[2]-r],[c[0]+r,c[1]+r,c[2]+r])
c = self.picking_controller.knowledge.bin_vantage_point(b)
if c:
glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[0.5,1,0.5,1])
r = 0.01
gldraw.box([c[0]-r,c[1]-r,c[2]-r],[c[0]+r,c[1]+r,c[2]+r])
#show object state
for i in ground_truth_items:
if i.xform == None:
continue
if i.bin_name == 'order_bin':
continue
#if perceived, draw in solid color
if self.picking_controller.knowledge.bin_contents[i.bin_name]!=None and i in self.picking_controller.knowledge.bin_contents[i.bin_name]:
glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[1,0.5,0,1])
draw_oriented_box(i.xform,i.info.bmin,i.info.bmax)
else:
#otherwise, draw in wireframe
glDisable(GL_LIGHTING)
glColor3f(1,0.5,0)
draw_oriented_wire_box(i.xform,i.info.bmin,i.info.bmax)
glEnable(GL_LIGHTING)
if self.draw_grasps:
#draw grasps, if available
g = self.picking_controller.knowledge.grasp_xforms(i)
if g:
for grasp,xform in g:
gldraw.xform_widget(xform,0.05,0.005)
#show gripper and camera frames
if self.draw_gripper_and_camera:
left_camera_link = self.simworld.robot(0).getLink(left_camera_link_name)
right_camera_link = self.simworld.robot(0).getLink(right_camera_link_name)
left_gripper_link = self.simworld.robot(0).getLink(left_gripper_link_name)
right_gripper_link = self.simworld.robot(0).getLink(right_gripper_link_name)
gldraw.xform_widget(left_camera_link.getTransform(),0.1,0.01)
gldraw.xform_widget(right_camera_link.getTransform(),0.1,0.01)
gldraw.xform_widget(se3.mul(left_gripper_link.getTransform(),left_gripper_center_xform),0.05,0.005)
gldraw.xform_widget(se3.mul(right_gripper_link.getTransform(),right_gripper_center_xform),0.05,0.005)
#draw order box
glDisable(GL_LIGHTING)
glColor3f(1,0,0)
draw_oriented_wire_box(order_bin_xform,order_bin_bounds[0],order_bin_bounds[1])
glEnable(GL_LIGHTING)
return
def keyboardfunc(self,c,x,y):
c = c.lower()
if c=='s':
self.simulate = not self.simulate
print "Simulating:",self.simulate
else:
self.command_queue.put(c)
if c=='q':
self.picking_thread.join()
exit(0)
glutPostRedisplay()
def load_apc_world():
"""Produces a world with only the Baxter, shelf, and ground plane in it."""
world = WorldModel()
#uncomment these lines and comment out the next 2 if you want to use the
#full Baxter model
#print "Loading full Baxter model (be patient, this will take a minute)..."
#world.loadElement(os.path.join(model_dir,"baxter.rob"))
print "Loading simplified Baxter model..."
world.loadElement(os.path.join(model_dir,"baxter_with_parallel_gripper_col.rob"))
print "Loading Kiva pod model..."
world.loadElement(os.path.join(model_dir,"kiva_pod/meshes/pod_lowres.stl"))
print "Loading plane model..."
world.loadElement(os.path.join(model_dir,"plane.env"))
#shift the Baxter up a bit (95cm)
Rbase,tbase = world.robot(0).getLink(0).getParentTransform()
world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))
world.robot(0).setConfig(world.robot(0).getConfig())
#translate pod to be in front of the robot, and rotate the pod by 90 degrees
reorient = ([1,0,0,0,0,1,0,-1,0],[0,0,0.01])
Trel = (so3.rotation((0,0,1),-math.pi/2),[1.2,0,0])
T = reorient
world.terrain(0).geometry().transform(*se3.mul(Trel,T))
#initialize the shelf xform for the visualizer and object
#xform initialization
global ground_truth_shelf_xform
ground_truth_shelf_xform = se3.mul(Trel,T)
return world
def load_baxter_only_world():
"""Produces a world with only the Baxter in it."""
world = WorldModel()
print "Loading simplified Baxter model..."
world.loadElement(os.path.join(model_dir,"baxter_with_parallel_gripper_col.rob"))
#shift the Baxter up a bit (95cm)
Rbase,tbase = world.robot(0).getLink(0).getParentTransform()
world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))
world.robot(0).setConfig(world.robot(0).getConfig())
return world
def spawn_objects_from_ground_truth(world):
"""For all ground_truth_items, spawns RigidObjects in the world
according to their sizes / mass properties"""
global ground_truth_items
print "Initializing world objects"
for item in ground_truth_items:
obj = world.makeRigidObject(item.info.name)
bmin,bmax = item.info.bmin,item.info.bmax
center = vectorops.div(vectorops.add(bmin,bmax),2.0)
m = obj.getMass()
m.setMass(item.info.mass)
m.setCom([0,0,0])
m.setInertia(vectorops.mul([bmax[0]-bmin[0],bmax[1]-bmin[1],bmax[2]-bmin[2]],item.info.mass/12.0))
obj.setMass(m)
c = obj.getContactParameters()
c.kFriction = 0.6
c.kRestitution = 0.1;
c.kStiffness = 100000
c.kDamping = 100000
obj.setContactParameters(c)
cube = obj.geometry()
if not cube.loadFile(os.path.join(model_dir,"cube.tri")):
print "Error loading cube file",os.path.join(model_dir,"cube.tri")
exit(1)
scale = [bmax[0]-bmin[0],0,0,0,bmax[1]-bmin[1],0,0,0,bmax[2]-bmin[2]]
translate = vectorops.sub(bmin,center)
cube.transform(scale,translate)
mesh = cube.getTriangleMesh()
obj.setTransform(item.xform[0],item.xform[1])
return
def main():
"""The main loop that loads the planning / simulation models and
starts the OpenGL visualizer."""
world = load_apc_world()
init_ground_truth()
if NO_SIMULATION_COLLISIONS:
simworld = load_baxter_only_world()
else:
simworld = load_apc_world()
spawn_objects_from_ground_truth(simworld)
#load the resting configuration from klampt_models/baxter_rest.config
global baxter_rest_config
f = open(model_dir+'baxter_with_parallel_gripper_rest.config','r')
baxter_rest_config = loader.readVector(f.readline())
f.close()
simworld.robot(0).setConfig(baxter_rest_config)
#run the visualizer
visualizer = MyGLViewer(simworld,world)
visualizer.run()
if __name__ == "__main__":
main()
###############################################################
# WRITTEN ANSWERS / COMMENTS:
# Q1.
# For question 1 I will briefly describe three possible issues that could occur when considering this software in the
# real world domain.
#
# 1.Firstly collisions resulting from from blindly solving and executing a given inverse kinematic chain. As can be
# seen from the simulation and code multiple collisions could occur (With Self, With Bins and with World).
# Examples are clear such as arms crossing, arms intersecting with Baxters head and arms moving through bins.
# A potential solution is to implement a step within the code that computes of the given inverse kinematics will
# intersect with any of these items before the transforms are applied to baxter in the real world.
#
# 2. Secondly are collisions that could occur as a result of lag in the system, if both arms are moving
# simultaneously this could cause issues even if ik collisions are computed. This could occur if on arm is not
# moving as quickly as expected and the other arm collides because of this. A solution to this could be only
# allowing one arm to move at a time or potentially planning paths that have tollerences for this contingency.
#
# 3. When BAxter is closely observed during grasping multiple issues arise including that the grasp positions seem
# a little too tight to the extent they intersect with the wire frame as well as grasping in positions that will
# require a large amount of force to achieve.Solutions to this include more in depth computation of the optimal
# place to grip an object as well as information about the structure of teh object.
#
# Q2.
# The implemented solution involves computes collisions one a given objective has been iksolved, a the code then checks
# if the computed kinematics would case any of the arm links to collide with the robot and if so another objective is
# computed and then solved until a collision free solution is found.
#
# 15 tests were conducted and the number of failed solutions were counted, from these tests it wss found that roughly
# 56% of the objectives solved in a given trial were found to have resulted in a collision which seems very high, based
# on this it could be prudent to develop more informed kinematics prior to solving them, potentially a bounded operating
# area could solve this.
# Q3.
#
#
# Q4.
#
#
# Q5 (bonus question).
#
#
| [
"hauser.kris@gmail.com"
] | hauser.kris@gmail.com |
925d1fac6a3242c64f799762acf35762a7142c23 | 117442c662cad35375630a8a800d48f8ba53888b | /facedancer/future/configuration.py | 77a35088ccb06c6fe79ae8e8ae75c099dce011b6 | [
"BSD-3-Clause"
] | permissive | walidbarakat/Facedancer | b9a09322541dd320cadefd063888030c9eb4192e | 28d3a900179e9dd280e007026a68fbdf97e4e35a | refs/heads/master | 2023-03-06T14:05:37.479626 | 2021-02-18T02:23:53 | 2021-02-18T02:23:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,304 | py | #
# This file is part of FaceDancer.
#
""" Functionality for describing USB device configurations. """
from dataclasses import dataclass, field
from typing import Iterable
from .types import USBDirection
from .magic import instantiate_subordinates, AutoInstantiable
from .request import USBRequestHandler
from .interface import USBInterface
from .descriptor import USBDescribable
from .endpoint import USBEndpoint
@dataclass
class USBConfiguration(USBDescribable, AutoInstantiable, USBRequestHandler):
""" Class representing a USBDevice's configuration.
Fields:
number -- The configuration's number; one-indexed.
configuration_string -- A string describing the configuration; or None if not provided.
max_power -- The maximum power expected to be drawn by the device when using
this interface, in mA. Typically 500mA, for maximum possible.
supports_remote_wakeup -- True iff this device should be able to wake the host from suspend.
"""
DESCRIPTOR_TYPE_NUMBER = 0x02
DESCRIPTOR_SIZE_BYTES = 9
number : int = 1
configuration_string : str = None
max_power : int = 500
self_powered : bool = True
supports_remote_wakeup : bool = True
parent : USBDescribable = None
interfaces : USBInterface = field(default_factory=dict)
def __post_init__(self):
# Gather any interfaces defined on the object.
self.interfaces.update(instantiate_subordinates(self, USBInterface))
@property
def attributes(self):
""" Retrives the "attributes" composite word. """
# Start off with the required bits set to one...
attributes = 0b10000000
# ... and then add in our attributes.
attributes |= (1 << 6) if self.self_powered else 0
attributes |= (1 << 5) if self.supports_remote_wakeup else 0
return attributes
#
# User API.
#
def get_device(self):
""" Returns a reference to the associated device."""
return self.parent
def add_interface(self, interface: USBInterface):
""" Adds an interface to the configuration. """
self.interfaces[interface.number] = interface
interface.parent = self
def get_endpoint(self, number: int, direction: USBDirection) -> USBEndpoint:
""" Attempts to find an endpoint with the given number + direction.
Paramters:
number -- The endpoint number to look for.
direction -- Whether to look for an IN or OUT endpoint.
"""
# Search each of our interfaces for the relevant endpoint.
for interface in self.interfaces.values():
endpoint = interface.get_endpoint(number, direction)
if endpoint is not None:
return endpoint
# If none have one, return None.
return None
#
# Event handlers.
#
def handle_data_received(self, endpoint: USBEndpoint, data: bytes):
""" Handler for receipt of non-control request data.
Typically, this method will delegate any data received to the
appropriate configuration/interface/endpoint. If overridden, the
overriding function will receive all data; and can delegate it by
calling the `.handle_data_received` method on `self.configuration`.
Parameters:
endpoint -- The endpoint on which the data was received.
data -- The raw bytes received on the relevant endpoint.
"""
for interface in self.interfaces.values():
if interface.has_endpoint(endpoint.number, direction=USBDirection.OUT):
interface.handle_data_received(endpoint, data)
return
# If no interface owned the targeted endpoint, consider the data unexpected.
self.get_device().handle_unexpected_data_received(endpoint.number, data)
def handle_data_requested(self, endpoint: USBEndpoint):
""" Handler called when the host requests data on a non-control endpoint.
Typically, this method will delegate the request to the appropriate
interface+endpoint. If overridden, the overriding function will receive
all data.
Parameters:
endpoint_number -- The endpoint number on which the host requested data.
"""
for interface in self.interfaces.values():
if interface.has_endpoint(endpoint.number, direction=USBDirection.IN):
interface.handle_data_requested(endpoint)
return
# If no one interface owned the targeted endpoint, consider the data unexpected.
self.get_device().handle_unexpected_data_requested(endpoint.number)
def handle_buffer_empty(self, endpoint: USBEndpoint):
""" Handler called when a given endpoint first has an empty buffer.
Often, an empty buffer indicates an opportunity to queue data
for sending ('prime an endpoint'), but doesn't necessarily mean
that the host is planning on reading the data.
This function is called only once per buffer.
"""
for interface in self.interfaces.values():
if interface.has_endpoint(endpoint.number, direction=USBDirection.IN):
interface.handle_buffer_empty(endpoint)
return
#
# Backend interface functions.
#
def get_interfaces(self) -> Iterable[USBInterface]:
""" Returns an iterable over all interfaces on the provided device. """
return self.interfaces.values()
def get_descriptor(self) -> bytes:
""" Returns this configurations's configuration descriptor, including subordinates. """
interface_descriptors = bytearray()
# FIXME: use construct
# All all subordinate descriptors together to create a big subordinate desciptor.
interfaces = sorted(self.interfaces.values(), key=lambda item: item.number)
for interface in interfaces:
interface_descriptors += interface.get_descriptor()
total_len = len(interface_descriptors) + 9
string_manager = self.get_device().strings
# Build the core interface descriptor.
d = bytes([
9, # length of descriptor in bytes
2, # descriptor type 2 == configuration
total_len & 0xff,
(total_len >> 8) & 0xff,
len(set(interface.number for interface in self.interfaces.values())),
self.number,
string_manager.get_index(self.configuration_string),
self.attributes,
self.max_power // 2
])
return d + interface_descriptors
#
# Interfacing functions for AutoInstantiable.
#
def get_identifier(self) -> int:
return self.number
#
# Backend functions for our RequestHandler class.
#
def _request_handlers(self) -> Iterable[callable]:
return ()
def _get_subordinate_handlers(self) -> Iterable[USBInterface]:
return self.interfaces.values()
| [
"k@ktemkin.com"
] | k@ktemkin.com |
342ff27bcaab154241f7bca43ea75b8295f3c8d7 | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client/gui/shared/fortificationsevents_dispatcher.py | 9b512b64b33ac34107ebb1d85b90c156e30e520c | [] | no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # Embedded file name: scripts/client/gui/shared/fortifications/events_dispatcher.py
from gui.shared import g_eventBus, events, EVENT_BUS_SCOPE
from gui.Scaleform.genConsts.FORTIFICATION_ALIASES import FORTIFICATION_ALIASES
def showFortBattleRoomWindow():
g_eventBus.handleEvent(events.LoadViewEvent(FORTIFICATION_ALIASES.FORT_BATTLE_ROOM_WINDOW_ALIAS), EVENT_BUS_SCOPE.LOBBY)
def showBattleConsumesIntro():
g_eventBus.handleEvent(events.LoadViewEvent(FORTIFICATION_ALIASES.FORT_COMBAT_RESERVES_INTRO_ALIAS), EVENT_BUS_SCOPE.LOBBY)
def loadFortView():
g_eventBus.handleEvent(events.LoadViewEvent(FORTIFICATION_ALIASES.FORTIFICATIONS_VIEW_ALIAS), EVENT_BUS_SCOPE.LOBBY) | [
"m4rtijn@gmail.com"
] | m4rtijn@gmail.com |
32977e46b6334a6b89b229388af28175edd3abd2 | eecd60fe3bc03d9553b9efb1b583243b38b070ea | /start2.py | 6c53c600912f07db3b896745c847e495da366041 | [] | no_license | spitikaris/experiment-control | ef328f322cc9e9e4e0bc5bfed63b285399a081b7 | 36181ffd9502fb5cd921e91b05c585fa3825d75d | refs/heads/master | 2016-09-06T06:40:02.035562 | 2016-03-04T17:22:15 | 2016-03-04T17:22:15 | 39,383,870 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | #!/usr/bin/python
from methods import *
shear()
| [
"root@mp-kadanoff.dhcp.rs.kp.dlr.de"
] | root@mp-kadanoff.dhcp.rs.kp.dlr.de |
7cf670a85f7dcf1fbf7f23cbce0cc5c89ae2b7dd | 9d7be99cdac8b809f51c46a943ad5feb14548160 | /listings2/data_scraper_listings2.py | 909ab9862d01e75ef3f07bebd7a3a3c06de53360 | [] | no_license | CateGitau/DSI_trick_challenge | 6c154b417a049ab0012edff0521d9e09387787f2 | ddafac1f21425cb2992ce717ecbb0776703ea88e | refs/heads/master | 2022-12-24T17:12:55.392276 | 2020-09-25T12:28:40 | 2020-09-25T12:28:40 | 297,908,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,659 | py | import requests as rq
import bs4 as bs
import traceback
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
import glob, os, time
import csv
from csv import writer
# # run the bellow file, if it gives an erro, it means you need to install chrome driver and put it in your path
# # this opens a chrome "site" based on the link below which we will scrape from
driver = webdriver.Chrome(executable_path="/home/cate/Downloads/chromedriver_linux64/chromedriver")
driver.get("https://www.property24.com/for-sale/cape-town/western-cape/432?PropertyCategory=House%2cApartmentOrFlat%2cTownhouse")
page_soup = bs.BeautifulSoup(driver.page_source,'lxml')
dict_data = {"location" :[], "price":[], "floor_size":[], "bathrooms":[], "bedrooms":[],"parking":[] }
icons = page_soup.find_all("span", class_= "p24_icons")
info = page_soup.find_all("div", class_= "p24_regularTile js_rollover_container")
def getValues(icons, info):
for values in info:
price = values.find('span', class_= 'p24_price')
if price:
price = price.text
else:
""
location = values.find('span', class_= "p24_location")
if location:
location = location.text
else:
""
dict_data["price"].append(price)
dict_data["location"].append(location)
#print(price)
for value in icons:
floor_size = value.find("span", class_= "p24_size")
if floor_size:
floor_size = floor_size.find("span").text
else:
""
bathrooms = value.find("span", {"title": "Bathrooms"})
if bathrooms:
bathrooms = bathrooms.find("span").text
else:
""
bedrooms = value.find("span", {"title": "Bedrooms"})
if bedrooms:
bedrooms = bedrooms.find("span").text
else:
""
parking = value.find("span", {"title": "Parking Spaces"})
if parking:
parking = parking.find("span").text
else:
""
dict_data["floor_size"].append(floor_size)
dict_data["bathrooms"].append(bathrooms)
dict_data["bedrooms"].append(bedrooms)
dict_data["parking"].append(parking)
return dict_data
def append_list_as_row(file_name, dict_data, field_names):
# Open file in append mode
with open(file_name, 'a+', newline='') as write_obj:
# Create a writer object from csv module
writer = csv.DictWriter(write_obj, fieldnames = field_names)
writer.writerow(dict_data)
csv_file = "final.csv"
count = 0
while True:
try:
driver.implicitly_wait(10)
page_soup = bs.BeautifulSoup(driver.page_source,'lxml')
icons = page_soup.find_all("span", class_= "p24_icons")
info = page_soup.find_all("div", class_= "p24_regularTile js_rollover_container")
dict_data = {"location" :[], "price":[], "floor_size":[], "bathrooms":[], "bedrooms":[],"parking":[] }
dict_data = getValues(icons, info)
field_names = dict_data.keys()
append_list_as_row('final.csv', dict_data, field_names)
count+= 1
print(f'{count}\r', end = "")
loadmore = driver.find_element_by_link_text("Next").click()
time.sleep(5)
#loadmore.send_keys(Keys.ENTER)
except Exception:
print("Reached bottom of page")
traceback.print_exc()
break | [
"catherinegitau94@gmail.com"
] | catherinegitau94@gmail.com |
2b27a139918a39541b3531a05a4ea20f012839be | b9eae7cbe3929c3348c9f6506b9df3877f24c46f | /4-queens/test_state.py | 6298ae87bc730021c0bd5f776a4813b6b2abce54 | [] | no_license | carloscotrini/PythonPuzzleSolver | 82246241f8cba0c123cf6bdf883687382ba37643 | 9e8d13608a274eb096888add45420be737e1369b | refs/heads/master | 2021-04-09T11:03:06.350259 | 2018-03-17T09:52:54 | 2018-03-17T09:52:54 | 125,504,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,841 | py | import unittest
from four_queen_state import FourQueenState
class StateTester(unittest.TestCase):
@staticmethod
def are_boards_equal(board1, board2):
if len(board1) != len(board2):
return False
for row1, row2 in zip(board1, board2):
if ''.join(row1) != ''.join(row2):
return False
return True
def test_apply(self):
board = [
['Q', '-', '-', '-'],
['-', '-', '-', 'Q'],
['-', '-', '-', '-'],
['-', '-', '-', '-']
]
current_row = 1
state = FourQueenState(board, current_row)
expected_board = [
['Q', '-', '-', '-'],
['-', '-', 'Q', '-'],
['-', '-', '-', '-'],
['-', '-', '-', '-']
]
expected_current_row = 2
expected_state = FourQueenState(expected_board, expected_current_row)
place_queen_at_col_2 = 2
result_state = state.apply(place_queen_at_col_2)
self.assertTrue(StateTester.are_boards_equal(result_state.board, expected_board),
msg="We asked your implementation to place a queen at column 2 for the following state:\n" +
str(state) + "\n\n" +
"We expected this to be the new state:\n" +
str(expected_state) + "\n\n" +
"but we got this instead:\n" +
str(result_state))
def test_solved(self):
board = [
['-', 'Q', '-', '-'],
['-', '-', '-', 'Q'],
['Q', '-', '-', '-'],
['-', '-', 'Q', '-']
]
current_row = 4
state = FourQueenState(board, current_row)
self.assertTrue(state.solved(),
msg="Your implementation incorrectly states that this state is not solved.\n" +
str(state))
board = [
['-', 'Q', '-', '-'],
['-', '-', 'Q', '-'],
['Q', '-', '-', '-'],
['-', '-', 'Q', '-']
]
current_row = 4
state = FourQueenState(board, current_row)
self.assertFalse(state.solved(),
msg="Your implementation incorrectly states that this state is solved.\n" +
str(state))
def test_consistent(self):
board = [
['-', '-', '-', '-'],
['-', '-', '-', '-'],
['-', '-', '-', '-'],
['-', '-', '-', '-']
]
current_row = 0
consistent_state = FourQueenState(board, current_row)
self.assertTrue(consistent_state.consistent(),
msg="Your implementation incorrectly states that this state is not consistent.\n" +
str(consistent_state))
board = [
['-', 'Q', '-', '-'],
['-', '-', '-', 'Q'],
['Q', '-', '-', '-'],
['-', '-', 'Q', '-']
]
current_row = 4
consistent_state = FourQueenState(board, current_row)
self.assertTrue(consistent_state.consistent(),
msg="Your implementation incorrectly states that this state is not consistent.\n" +
str(consistent_state))
board = [
['-', 'Q', '-', '-'],
['-', '-', 'Q', '-'],
['Q', '-', '-', '-'],
['-', '-', 'Q', '-']
]
current_row = 4
inconsistent_state = FourQueenState(board, current_row)
self.assertFalse(inconsistent_state.consistent(),
msg="Your implementation incorrectly states that this state is consistent.\n" +
str(inconsistent_state))
if __name__ == "__main__":
unittest.main()
| [
"ccarlos@inf.ethz.ch"
] | ccarlos@inf.ethz.ch |
cf98191c381f004990782cd7c4a7c0a3e6519471 | 0b646cde920300867c3ecfa95dfa32252921e3b2 | /filehandling.py | a44ff9ea03bd6d9f02fa29520b0ae8a7bf955009 | [] | no_license | shivdazed/Python-Projects- | cefffcccf527aefb50bdbb66a2bb5da789615562 | 2beba43f452eb01b6395195abe637184680a68d3 | refs/heads/main | 2023-08-18T21:56:39.549072 | 2021-10-07T10:08:35 | 2021-10-07T10:08:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | file = open(r'C:\Users\anuro\Desktop\student.txt','r')
n = input("Enter your name:")
std = int(input("\Enter which standard you are in currently:"))
std_id = input("\Enter your student ID:")
m = []
print("Enter the marks of Five Subjects,Physics Chemistry Maths Biology English:")
for i in range(5):
r =int(input("Enter the marks:))
m.append(r)
| [
"noreply@github.com"
] | noreply@github.com |
5a50703fd04bbf22be867447aad4b1547f059adc | 794629855792d259834fc5075ca7dce69660b2ec | /MATLAB/gdf_to_hdf.py | 3a04c3f4b43871a13967d7d98c42b970fbd9fe98 | [] | no_license | rfeynman/GTX | 99ecd576f8eaa19dc2c2c4f6772d3e1c8ca469a9 | 64c75dc826a6399ef527b7324dcd9b2faa860841 | refs/heads/master | 2020-03-23T13:08:35.576736 | 2018-07-19T16:06:18 | 2018-07-19T16:06:18 | 141,602,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,742 | py | """Creates .hdf files from General Particle Tracer .gdf files
Author: Rory Speirs
Institution: School of Physics, The University of Melbourne.
Email: roryspeirs@gmail.com
Date: 13th June 2014
Version: 1.1
Licence: "THE BEER-WARE LICENSE" (Revision 42)
Fixes from Version 1.0:
Fixed gpthdf_to_slabhdf so it doesn't crash if you add or remove particles in GPT.
To use:
Type in terminal/command prompt:
'python gdf_to_hdf.py [.gdf_directory] [.hdf directory] ['True' if hierical layout is also desired] ['HDFtoSLAB' if only conversion of pre-existing .hdf with hierical layout to slab layout is required]'
See function docstrings for specific information.
Main GDF conversion function is 'gdf_to_hdf'. Other functions are convenience functions.
This script was intentionally made similar to the 'loadgdf.m' MATLAB script written
by Merijn Rijnders, Peter Pasmans of Eindhoven University. If you understand one,
you will understand both.
Tested on python 2.7 on OSX 10.9.2 and Windows xp.
Your python version must have access to the modules below:
"""
from __future__ import division
from pylab import *
import h5py
import time
import struct
import os
import sys
###############################################################################
def gdf_to_hdf(gdf_file_directory, hdf_file_directory):
"""Copys a General Particle Tracer .gdf file into a more standard .hdf file.
The resulting .hdf file has a highly hierical layout, similar to the layout
of the original .gdf. It can be opened by any standard hdf viewer or library.
See http://www.hdfgroup.org/ for a GUI based viewer.
The hierical structure is not necessarily the most convenient layout, so a
companion function has also been written, 'gpthdf_to_slabhdf', which takes
the .hdf file generated by 'gdf_to_hdf' and condenses all the data into one
main multidimitional data array.
"""
print 'Converting .gdf to .hdf file with hierical layout.'
#Delete the .hdf file if it already exits to stop crashes from trying to overwrite datasets
if os.path.exists(hdf_file_directory):
os.remove(hdf_file_directory)
hdf_f = h5py.File(hdf_file_directory, 'a')
#Constants
GDFNAMELEN = 16; #Length of the ascii-names
GDFID = 94325877; #ID for GDF
#Data types
t_ascii = int('0001', 16) #ASCII character
t_s32 = int('0002', 16) #Signed long
t_dbl = int('0003', 16) #Double
t_undef = int('0000', 16) #Data type not defined
t_null = int('0010', 16) #No data
t_u8 = int('0020', 16) #Unsigned char
t_s8 = int('0030', 16) #Signed char
t_u16 = int('0040', 16) #Unsigned short
t_s16 = int('0050', 16) #Signed short
t_u32 = int('0060', 16) #Unsigned long
t_u64 = int('0070', 16) #Unsigned 64bit int
t_s64 = int('0080', 16) #Signed 64bit int
t_flt = int('0090', 16) #Float
#Block types
t_dir = 256 # Directory entry start
t_edir = 512 # Directory entry end
t_sval = 1024 # Single valued
t_arr = 2048 # Array
with open(gdf_file_directory, 'rb') as f: #Important to open in binary mode 'b' to work cross platform
#Read the GDF main header
gdf_id_check = struct.unpack('i', f.read(4))[0]
if gdf_id_check != GDFID:
raise RuntimeWarning('File directory is not a .gdf file')
time_created = struct.unpack('i', f.read(4))[0]
hdf_f.attrs['time_created'] = str(time_created) + ': ' + str(time.ctime(int(time_created)))
#get creator name and use string part upto zero-character
creator = list(f.read(GDFNAMELEN))
creator = [struct.unpack('B', element)[0] for element in creator]
creator_name = []
for element in creator:
if element is 0:
break
else:
creator_name.append(chr(element))
hdf_f.attrs['creator_name'] = ''.join(creator_name)
#get destination and use string part upto zero-character
dest = f.read(GDFNAMELEN)
dest = [struct.unpack('B', element)[0] for element in dest]
destination = []
for element in dest:
if element is 0:
break
else:
destination.append(chr(element))
hdf_f.attrs['destination'] = ''.join(destination)
#get other metadata about the GDF file
major = struct.unpack('B', f.read(1))[0]
minor = struct.unpack('B', f.read(1))[0]
hdf_f.attrs['gdf_version'] = str(major) + '.' + str(minor)
major = struct.unpack('B', f.read(1))[0]
minor = struct.unpack('B', f.read(1))[0]
hdf_f.attrs['creator_version'] = str(major) + '.' + str(minor)
major = struct.unpack('B', f.read(1))[0]
minor = struct.unpack('B', f.read(1))[0]
hdf_f.attrs['destination_version'] = str(major) + '.' + str(minor)
f.seek(2, 1) # skip to next block
#Create first hdf group and sub groups for data to be put into
#First group is called "datagrab" because it could be output at a particular time, or the projection at a particular position
grab_group_number = 0
grab_group = hdf_f.create_group('datagrab_' + str(grab_group_number))
grab_group.attrs['grab_number'] = grab_group_number
data_group = grab_group.create_group('data')
param_group = grab_group.create_group('param')
#Initialise values to print progress to terminal
file_size = os.stat(gdf_file_directory)[6]
start_time = time.time()
last_running_time = 0
#Read GDF data blocks
lastarr = False
while True:
if f.read(1) == '':
break
f.seek(-1, 1)
#Read GDF block header
name = f.read(16)
typee = struct.unpack('i', f.read(4))[0]
size = struct.unpack('i', f.read(4))[0]
#Get name
name = name.split()[0]
#Get block type
dir = int(typee & t_dir > 0)
edir = int(typee & t_edir > 0)
sval = int(typee & t_sval > 0)
arr = int(typee & t_arr > 0)
#Get data type
dattype = typee & 255
#Check if array block is finished
if lastarr and not arr:
#We save the stuff as we go rather than storing it in local dictionaries and creating all the groups at the end. Here we make the groups for next time step, as this code only runs when all data current block has been extracted
grab_group_number += 1
grab_group = hdf_f.create_group('datagrab_' + str(grab_group_number))
grab_group.attrs['grab_number'] = grab_group_number
data_group = grab_group.create_group('data')
param_group = grab_group.create_group('param')
#Read single value
if sval:
if dattype == t_dbl:
value = struct.unpack('d', f.read(8))[0]
param_group.create_dataset(name, data=value)
elif dattype == t_null:
pass
elif dattype == t_ascii:
value = str(f.read(size))
value = value.strip(' \t\r\n\0')
try:
param_group.create_dataset(name, data=value)
except RuntimeError:
del param_group[name]
param_group.create_dataset(name, data=value)
elif dattype == t_s32:
value = struct.unpack('i', f.read(4))[0]
param_group.create_dataset(name, data=value)
else:
print 'unknown datatype of value!!!'
print 'name=', name
print 'type=', typee
print 'size=', size
value = f.read(size)
#Read data array
if arr:
if dattype == t_dbl:
if (size % 8) != 0:
raise RuntimeWarning('Tried to save an array of doubles, but the array size is not consistant with that of doubles.')
value = fromfile(f, dtype=dtype('f8'), count=int(size/8))
data_group.create_dataset(name, data=value)
else:
print 'unknown datatype of value!!!'
print 'name=', name
print 'type=', typee
print 'size=', size
value = f.read(size)
#Print out progress approx. every 2 seconds
running_time = int(round(time.time()-start_time))
if (running_time % 2 is 0) and not (running_time is last_running_time):
percent_done = f.tell()/file_size*100
print 'Completed: %(completed).1f%(percent_sign)s' % {'completed':percent_done, 'percent_sign':'%'}
last_running_time = running_time
lastarr = arr;
f.close()
hdf_f.close()
print 'Converting .gdf to .hdf file with hierical layout... Complete.'
###############################################################################
def gpthdf_to_slabhdf(gdf_hdf_file_directory, slab_hdf_file_directory):
"""Creates a .hdf with data in a single slab from a .hdf with GPT style layout.
This actually produces two datasets: 'data' contains all the information about
the particles, and 'position' or 'time' which contains the position or time
that the data was saved in gpt.
Note: It is possible to add and remove particles in GPT at different times.
The slab will be a cube which is large enouth to fit the data assuming all
the particles exist at every datagrab. If particles don't actually exist at
a datagrab (ie at a particular time or position), then the entries in the slab
will be zeros for these particles. This makes it easy to see if a particle
exists at a certain time/position: just check for a zero in the ID column.
See the 'slab_indexing' attribute of the 'data' dataset to see how the
dataset is indexed.
"""
print 'Creating .hdf file with slab layout.'
#Delete the .hdf file if it already exits to stop crashes from trying to overwrite datasets
if os.path.exists(slab_hdf_file_directory):
os.remove(slab_hdf_file_directory)
gpthdf_f = h5py.File(gdf_hdf_file_directory, 'a') #Existing hdf file in hierical layout
slabhdf_f = h5py.File(slab_hdf_file_directory, 'a') #New hdf file with the slab layout
#Transfer the original GDF file header to the attributes of the slab_hdf file
for attribute_key in gpthdf_f.attrs.keys():
slabhdf_f.attrs[attribute_key] = gpthdf_f.attrs[attribute_key]
#Get information about the size of the required data slab
datagrab_keys = gpthdf_f.keys()
datagrab_n = len(datagrab_keys) - 1
particle_n = int(gpthdf_f['datagrab_0/param/Nmp'][...])
column_keys = gpthdf_f['datagrab_0/data/'].keys()
#The last datagrab always contains some other parameters. Add these to the attributes of the slab_hdf file
for dset_key in gpthdf_f['datagrab_' + str(datagrab_n) + '/param/'].keys():
slabhdf_f.attrs[dset_key] = gpthdf_f['datagrab_' + str(datagrab_n) + '/param/' + dset_key][...]
#Autodetect the data_grab_variable.
if 'time' in gpthdf_f['datagrab_0/param/'].keys():
data_grab_variable = 'time'
elif 'position' in gpthdf_f['datagrab_0/param/'].keys():
data_grab_variable = 'position'
else:
data_grab_variable = 'unknown_grab_variable'
#Create empty datasets which will hold all the data, the "slabs".
data_type_required = 'float64'
dset_slab = slabhdf_f.require_dataset('data', (datagrab_n, particle_n, len(column_keys)), dtype=data_type_required)
dset_grab_variable = slabhdf_f.require_dataset(data_grab_variable, (datagrab_n,), dtype=data_type_required)
#Add attributes to the slab
dset_slab.attrs['slab_indexing'] = '[' + data_grab_variable + '(0:' + str(datagrab_n) + '), particle_num(0:' + str(particle_n-1) + '), column(' + str(','.join(column_keys)) +') ]'
for param_key in gpthdf_f['datagrab_0/param/'].keys():
if str(param_key) == data_grab_variable:
pass
else:
dset_slab.attrs[param_key] = gpthdf_f['datagrab_0/param/' + param_key]
#Initialise values to print progress to terminal
last_running_time = 0
start_time = time.time()
#Fill the data slab and time/position array
for completion_count, grab_key in enumerate(datagrab_keys):
grab_number = int(gpthdf_f[grab_key].attrs['grab_number'])
if grab_number == datagrab_n:
pass
else:
dset_grab_variable[grab_number] = gpthdf_f[grab_key + '/param/' + data_grab_variable][...]
#Now fill the slab. Be Careful, sometimes not all particles actually exist.
particle_IDs = gpthdf_f[grab_key + '/data/ID'][...]
#Worth separating because it is faster to fill the whole column with a ':'
if len(particle_IDs) == particle_n:
for idx, column_key in enumerate(column_keys):
dset_column = gpthdf_f[grab_key + '/data/' + column_key]
dset_slab[grab_number, :, idx] = dset_column[:]
else:
for idx, column_key in enumerate(column_keys):
dset_column = gpthdf_f[grab_key + '/data/' + column_key]
dset_slab[grab_number, particle_IDs-1, idx] = dset_column[:]
#Print out progress every 2 seconds
running_time = int(round(time.time()-start_time))
if (running_time % 2 is 0) and not (running_time is last_running_time):
percent_done = completion_count/datagrab_n*100
print 'Completed: %(completed).1f%(percent_sign)s' % {'completed':percent_done, 'percent_sign':'%'}
last_running_time = running_time
gpthdf_f.close()
slabhdf_f.close()
print 'Creating .hdf file with slab layout... Complete.'
###############################################################################
def terminal_call(terminal_args):
"""Allows arguments to be conveniently passed from the terminal/command line so there is no need to edit this python script.
To use, type: 'python gdf_to_hdf.py .gdf_directory [.hdf directory] ['True' if hierical layout is also desired] ['HDFtoSLAB' if only conversion of pre-existing .hdf with hierical layout to slab layout is required]'
# Note: order of arguments in '[]' is irrelivant and hierical file will be
appended with '_hierical.hdf', or if conversion from hierical to slab, then
slab file will be appended with '_slab.hdf'. Conversions can only be done
from .gdf to hierical layout .hdf to slab layout .hdf, and not the other
way around.
"""
hierical_suffix = '_hierical'
slab_suffix = '_slab'
HDFtoSLAB = False
gdf_arg = False
hdf_arg = False
keep_hierical = False
#
for arg in terminal_args:
if arg[-4:] == '.gdf':
gdf_file_directory = arg
gdf_arg = True
elif arg[-4:] == '.hdf':
hdf_file_directory = arg
hdf_arg = True
elif arg in ['True', 'true', 'TRUE', 't', 'T', 'Yes', 'yes', 'YES', 'y', 'Y']:
keep_hierical = True
elif arg in ['HDFtoSLAB']:
HDFtoSLAB = True
# If this option, only action is convert a pre-existing .hdf that is in hierical layout
if HDFtoSLAB:
if hdf_arg:
if os.path.exists(hdf_file_directory):
hdf_slab_file_directory = hdf_file_directory[:-4] + slab_suffix + hdf_file_directory[-4:]
gpthdf_to_slabhdf(hdf_file_directory, hdf_slab_file_directory)
else:
print 'The .hdf file does not exist to convert to slab layout.'
else:
print 'The .hdf file was not specified.'
# If this option, convert .gdf to .hdf slab layout, and optionally keep the .hdf with hierical layout that is generated along the way.
elif gdf_arg:
if os.path.exists(gdf_file_directory):
if not hdf_arg:
hdf_slab_file_directory = gdf_file_directory[:-4] + '.hdf'
hdf_file_directory = gdf_file_directory[:-4] + hierical_suffix + '.hdf'
print 'Destination .hdf directory not specified. Defaulting to ' + hdf_slab_file_directory
else:
hdf_slab_file_directory = hdf_file_directory
hdf_file_directory = hdf_slab_file_directory[:-4] + hierical_suffix + '.hdf'
gdf_to_hdf(gdf_file_directory, hdf_file_directory)
gpthdf_to_slabhdf(hdf_file_directory, hdf_slab_file_directory)
if not keep_hierical:
print 'Removing .hdf with hierical layout.'
os.remove(hdf_file_directory)
else:
print 'The .gdf file does not exist to convert to .hdf'
else:
print '.gdf file not specified to convert to .hdf, or \'HDFtoSLAB\' option not selected to convert .hdf to slab layout'
###############################################################################
def script_call():
"""Allows conversion by editing of filenames in this script. Consider passing arguments directly from the terminal for convenience.
"""
gdf_file_directory = 'gptfile.gdf'
hdf_file_directory = gdf_file_directory[:-4] + '.hdf'
slab_hdf_file_directory = gdf_file_directory[:-4] + '_slab.hdf'
#Convert GDF to HDF with heirical layout
gdf_to_hdf(gdf_file_directory, hdf_file_directory)
#Create a new HDF file with the easier to use slab layout
gpthdf_to_slabhdf(hdf_file_directory, slab_hdf_file_directory)
###############################################################################
#Make program run now...
if __name__ == "__main__":
# Terminal use: 'python gdf_to_hdf.py [.dgf dir] [.hdf dir] ['True' if slab desired] ['HDFtoSLAB' if only conversion of pre-existing .hdf is required]'
# Note: order of arguments in '[]' is irrelivant
terminal_args = sys.argv
if len(terminal_args) is 1:
script_call() #Run 'script_call' function if no terminal arguments passed
else:
terminal_call(terminal_args)#Run 'terminal_call' function arguments are passed
| [
"wedwrd@gmail.com"
] | wedwrd@gmail.com |
47b046f53c3bbc3a2866d330046d86bfbe4ff1e8 | ac05fb0224ac423236ed492bc2d3e7a78f75822f | /Calculator.py | 22706a86ca12569a42420bcdd8cbde2a131c71d9 | [] | no_license | yash-1o1/calculator | 4ae4f1063749db9ba86efc6b083c78ec77d18dc5 | b6bd4c368a4f1da6b7e015cc38b65718e92a8a14 | refs/heads/master | 2023-03-06T10:58:28.051752 | 2021-02-22T02:08:18 | 2021-02-22T02:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,890 | py | operation = input("Choose an operation to input (You can choose 'add', 'sub', 'multi' , 'div'): ")
if operation == "add":
first = input('Choose the first number to be added number: ')
second = input('Choose the second number: ')
output = int(first) + int(second)
print ('The result of addition was %s \n' % (output))
elif operation == "sub":
first = input('Choose first number to be subtracted from: ')
second = input('Choose the second number to be subtracted: ')
output = int(first) - int(second)
print ('The result of subtraction was %s \n' % (output))
elif operation == "multi":
first = input('Choose the first number you want to multiply: ')
second = input('Choose the second number: ')
output = int(first) * int(second)
print ('The result of multiplication was %s \n' % (output))
elif operation == "div":
first = input('Choose the divident: ')
second = input('Choose the divisor: ')
if int(second) == 0:
output("Error: Can't divide by 0 \n")
else:
output = int (first) / int(second)
print ('The result of division was %s \n' % (output))
else:
print ('Invalid command entered')
contnue = input("Choose an operation to input (You can choose 'yes' or 'no'): ")
while contnue == "yes":
operation = input("Choose an operation to input (You can choose 'add', 'sub', 'multi' ,'div'): ")
if operation == "add":
first = input('Choose the first number to be added number: ')
second = input('Choose the second number: ')
output = int(first) + int(second)
print ('The result of addition was %s \n' % (output))
elif operation == "sub":
first = input('Choose first number to be subtracted from: ')
second = input('Choose the second number to be subtracted: ')
output = int(first) - int(second)
print ('The result of subtraction was %s \n' % (output))
elif operation == "multi":
first = input('Choose the first number you want to multiply: ')
second = input('Choose the second number: ')
output = int(first) * int(second)
print ('The result of multiplication was %s \n' % (output))
elif operation == "div":
first = input('Choose the divident: ')
second = input('Choose the divisor: ')
if int(second) == 0:
print("Error: Can't divide by 0 \n")
else:
output = int (first) / int(second)
print ('The result of division was %s \n' % (output))
else:
print ('Invalid command entered')
contnue = input("Choose an operation to input (You can choose 'yes' or 'no'): ")
if contnue != "yes":
print ('Thank You Professor! Was an amazing overall semester and even though this the third time \nI have taken a class from you, I just hope to get an opportunity to be taught by you again. \nHave a nice day!') | [
"60495222+yashv99@users.noreply.github.com"
] | 60495222+yashv99@users.noreply.github.com |
1a36c100420b71b8f0f0538dbf8634185eb93879 | bb121db77a53448f5235b5524eadbe3b59efea04 | /æfingapróf/student_grades.py | 9cacce50adad97b983d166771c5d2dd44925eea2 | [] | no_license | RoslinErla/AllAssignments | a8bfd4e4df5177e76472678cbfeb13b1d49abc56 | 1a8f098e9ecde015de70970cd5c17501c510fb19 | refs/heads/master | 2020-07-27T12:44:29.974705 | 2019-11-15T13:28:44 | 2019-11-15T13:28:44 | 209,094,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,626 | py | # Skrifið Python forrit sem heldur utan um einkunnir fjögurra nemanda.
# Hver nemandi á einkvæmt nafn (þ.e enginn nemandi heitir það sama) og 3 einkunnir.
# Write a Python program that manages grades for four students.
# Each student has a unique name and 3 grades.
# Notandi á að geta slegið inn nöfn allra nemandanna sem og einkunnir hvers og eins nemanda.
# Forritið skal prenta út nöfn allra nemanda í stafsrófsröð og einkunnir þeirra.
# Að lokum prentar forritið út nafn nemandans með hæstu meðaleinkunnina ásamt meðaleinkunninni hans. Meðaleinkunnin skal vera prentuð út með tveimur aukastöfum.
# The user should be able to input the name of each student and the corresponding grades for each student.
# The program prints the names of each student in alphabetical order along with their grades.
# Finally, the program prints the name of the student with the highest average grade along with the student's average grade. The average grade should be printed with 2 digits after the decimal point.
# Ef að tveir eða fleiri nemendur eru með sömu meðaleinkunnina skal birta þann nemanda sem kemur fyrir fyrstur í stafrófinu.
# If two or more students have the same average grade the student that appears first in the alphabetical order should be printed.
import operator
STUDENTS = 4
GRADES = 3
def make_student_dict(a_dict):
grade_list= list()
for i in range(STUDENTS):
name = input("Student name: ")
for i in range(1, GRADES+1):
grades = input("Input grade number" +" " + str(i) + ": ")
grade_list.append(float(grades))
a_dict[name] = grade_list
grade_list = list()
return a_dict
def print_dict(a_dict):
new_dict = sorted(a_dict.items(), key=operator.itemgetter(0))
print("Student list:")
for tuples in new_dict:
print(tuples[0], ":",tuples[1])
def find_average(a_dict):
for keys in a_dict:
ave = 0
for elements in a_dict[keys]:
ave += elements
average = ave / GRADES
a_dict[keys] = average
return a_dict
def print_highest_ave (a_dict):
new_dict = sorted(a_dict.items(), key=operator.itemgetter(1),reverse=True)
print("Student with highest average grade:")
key_list = list()
for keys in new_dict:
key_list.append(keys[0])
key = key_list[0]
print(key,"has an average grade of {:.2f}".format(a_dict[key]))
def main():
student_dict = dict()
make_student_dict(student_dict)
print_dict(student_dict)
find_average(student_dict)
print_highest_ave(student_dict)
main()
| [
"roslin19@ru.is"
] | roslin19@ru.is |
7a3938b589e748860c0fad0c8dd3a50430ef40b9 | 074afd26d00bb742b03c12891b057ab263e640bf | /codeforces/1451A.py | 6eac98a8a0739915d4a2d7cff440dedefa842769 | [] | no_license | IsmailTitas1815/Data-Structure | 7a898800b1e53c778b1f2f11b0df259e52c20140 | fece8dd97d3e162e39fc31d5f3498a6dac49b0f0 | refs/heads/master | 2023-02-05T10:39:49.349484 | 2020-12-21T13:37:22 | 2020-12-21T13:37:22 | 296,343,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | for i in range(int(input())):
n = int(input())
if n<4:
if n==1:
print(0)
elif n==2:
print(1)
elif n==3:
print(2)
else:
if n%2==0:
print(2)
else:
print(3)
| [
"titas.sarker1234@gmail.com"
] | titas.sarker1234@gmail.com |
2b323efbad99418306a935443e1532e869158494 | 9567f5dc22025b4164f6828384bc0b2366e9aa56 | /Sparse_Matrix/mapping.py | 989487d036f6b2c628392bb8049d1ef08cc9c69b | [] | no_license | saurabhjn76/Sparse_Matrix_Effcient_Storage | 694b07f8321c7c7ef6e583706eaa891bd352b10e | 41fdc64c53d951aab163ca8bc2b91df19172826f | refs/heads/master | 2020-03-10T00:58:28.769319 | 2018-05-04T07:25:15 | 2018-05-04T07:25:39 | 129,096,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | from bitstring import BitArray
import random
# s = random.randint(0, 2**10000 - 1)
# b = BitArray(uint=s, length=10000)
# for i in range(0,100):
# print b[i]
row_mapping = {}
row_reverse_mapping = {}
col_mapping = {}
col_reverse_mapping = {}
limit_index = 3
def mapInit(mapping, reverse_mapping,limit_index):
count = 0
while(count<limit_index):
s = random.randint(0, 2**10000 - 1)
b = BitArray(uint=s, length=10000)
if str(b.bin) not in mapping.keys():
mapping[str(b.bin)] = count
reverse_mapping[count] = str(b.bin)
count = count + 1
return mapping, reverse_mapping
# if reverse_mapping[0]== str(BitArray(bin =reverse_mapping[0]).bin):
# print 'Yes'
# else:
# print 'failed'
#print mapping
| [
"saurabhjn76@gmail.com"
] | saurabhjn76@gmail.com |
04a62d97db8038ce3e524429f9e169268e86342c | c4e62e89f74b1c26da30d292f18842d333326951 | /Symmetric_tree/answer.py | 2cd9737af395124f2637602006014bc29f5d51c4 | [] | no_license | yunishimura0716/LeetCodeSolutions | ce4b85b652de458445683027f674c05e74fa41b5 | 0bf771e9fdf11a73c5631cdec07c8b5552a6b070 | refs/heads/master | 2023-02-13T14:37:16.605460 | 2021-01-15T01:07:32 | 2021-01-15T01:07:32 | 288,928,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
# time: O(n), space: O(n)
# if not root:
# return True
# def helper1(root, queue):
# queue.append(root)
# if not root:
# return
# helper1(root.left, queue)
# helper1(root.right, queue)
# def helper2(root, queue):
# tmp = queue.popleft()
# if not root or not tmp:
# return tmp == root
# return root.val == tmp.val and helper2(root.right, queue) and helper2(root.left, queue)
# q = deque()
# helper1(root.left, q)
# return helper2(root.right, q)
# time: O(n), space: O(1)
def isMirror(left, right):
if not left and not right:
return True
if not left or not right:
return False
if left.val == right.val:
return isMirror(left.left, right.right) and isMirror(left.right, right.left)
else:
return False
if not root:
return True
else:
return isMirror(root.left, root.right)
| [
"yunishimura0716@gmail.com"
] | yunishimura0716@gmail.com |
97f670472d760a6545e49f73ff3d30a0f4f277f2 | 9b336efa6f25a584aea6fd69795179cdc3883786 | /DFLTest/join_test/admin.py | 0b4f76407eb0285c474d6a7639127bb2ab6bfa03 | [] | no_license | mhall119/django-fluxlang | 42eb4543d46c7a62c5a152036da065387e86e5ec | c795d9bd36d67cf0dc21011eaed2642e28f5fa05 | refs/heads/main | 2023-05-07T10:40:38.637608 | 2021-05-28T14:52:04 | 2021-05-28T14:52:04 | 371,733,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from django.contrib import admin
from .models import HostInfo
# Register your models here.
class HostAdmin(admin.ModelAdmin):
list_display = ("host", "ip_addr", "site")
admin.site.register(HostInfo, HostAdmin)
| [
"mhall119@gmail.com"
] | mhall119@gmail.com |
6b2421764e5d39016f0e51d1a1ad0d4d9f0e6e10 | e27333261b8e579564016c71d2061cc33972a8b8 | /.history/ScrapeArticleTitle_20210803181904.py | 01c3cfb222879a03f49b8531a28bb390ba6afeaa | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | import pandas as pd
import json
import ast
import os
from nltk.tokenize import word_tokenize
from IPython.display import display
def get_article_titles_from_json():
filename = r"D:\Desktop\IR_term_8\sample-1M.jsonl" #file is too huge
with open(filename) as json_file:
data = json_file.readlines()
data = list(map(json.loads, data))
df = pd.DataFrame(data)
for col in df.columns:
print(col)
for col in df.columns:
print (col)
labels_to_drop = ["content", "media-type"]
df = df.drop(labels_to_drop, axis = 1)
count = len(df)
for idx, e in df.iterrows():
print("Row ",idx," out of ",count)
entry = e.values.tolist()
print (entry)
#for src in src_lst:
# print (src)
#output.to_csv(output_path, sep='\t', header=is_first, index=False, mode='a')
#is_first = False
#df.to_csv('article_titles.csv', index=False)
#Tokenising Funtions
def tokenize_stem_lower(text):
tokens = word_tokenize(text)
tokens = list(filter(lambda x: x.isalpha(), tokens))
tokens = [porter.stem(x.lower()) for x in tokens]
return ' '.join(tokens)
def get_clean_data(df):
df['clean_text'] = df.apply(lambda x: tokenize_stem_lower(x.tweet), axis=1)
return df
def check_if_article_itle_exist_in_tweets_csv(tweets_data, titles_data):
article_ids_in_tweets_csv = tweets_data['article_id'].tolist()
new_df = pd.DataFrame()
for index, row in titles_data.iterrows():
article_id = row.id
if article_id in article_ids_in_tweets_csv:
new_df = new_df.append(row)
display(new_df)
new_df.to_csv('article_title_new.csv', index=False)
return
get_article_titles_from_json() | [
"chiayik_tan@mymail.sutd.edu.sg"
] | chiayik_tan@mymail.sutd.edu.sg |
7b2de2370af01dcc4b23681e70b09bab35acf286 | 3c5c4c4fb296d08e9e984c4a60ae4fa147293e9a | /ceres/__init__.py | b359ff8e73e6e5bd29908753a42b11e1a2d10ffa | [
"Apache-2.0"
] | permissive | signingup/ceres-combineharvester | a8874ab11145e7ba2223b85483b96dea01054ad0 | aad918a03a4a522e0e2f3bac104d19d693d6bf79 | refs/heads/main | 2023-07-25T04:11:13.765471 | 2021-09-09T14:59:48 | 2021-09-09T14:59:48 | 404,918,382 | 1 | 0 | Apache-2.0 | 2021-09-10T01:22:20 | 2021-09-10T01:22:20 | null | UTF-8 | Python | false | false | 313 | py | from pkg_resources import DistributionNotFound, get_distribution, resource_filename
try:
__version__ = get_distribution("ceres-blockchain").version
except DistributionNotFound:
# package is not installed
__version__ = "unknown"
PYINSTALLER_SPEC_PATH = resource_filename("ceres", "pyinstaller.spec")
| [
"hulatang_eric@163.com"
] | hulatang_eric@163.com |
015a80c9d53750fc086c6bd24e6383fb96225859 | 49d33002fbef0be0bc7dcee807865c97130d64fe | /src/oscar/apps/voucher/__init__.py | 1eba2925b030cd2958cc65cca10ca5198fb4ad2a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | specialunderwear/django-oscar | 8e9e48254ca61bcb4a9fb4642c823ba72b3ca630 | fc5d2601583c83f7361437eb3c2d6dc132e4d574 | refs/heads/master | 2023-07-21T03:19:13.243668 | 2023-03-07T09:14:41 | 2023-03-07T09:14:41 | 114,625,742 | 0 | 4 | BSD-3-Clause | 2023-07-06T10:12:26 | 2017-12-18T10:08:46 | Python | UTF-8 | Python | false | false | 63 | py | default_app_config = 'oscar.apps.voucher.config.VoucherConfig'
| [
"m@maikhoepfel.de"
] | m@maikhoepfel.de |
f6ca25189a4391cf1ce1aff9bf54cd7dc034d81b | 5a9b775f8a5e25a369228ee10111cc05eb06ad39 | /ejm1.py | 268a9ff28def6460d12eced9f76881e395753d28 | [] | no_license | LuisDLCP/Landslide_Project | 5280881fc339fe745d3a88828a18dd454033aa58 | 619891e5b3bfa741f71bccefec38864892282002 | refs/heads/master | 2022-07-27T14:24:36.290440 | 2019-11-16T01:03:04 | 2019-11-16T01:03:04 | 203,256,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 16:26:37 2019
@author: soporte
"""
import os
#import sys
#sys.path.insert(1,os.getcwd()+'/Results/Imagenes_finales_reconstruidas_BP/')
#import TEXT as tx
#os.chdir(os.path.dirname(__file__))
#print("La ruta es: "+os.getcwd())
i=0
scr_dir = os.getcwd()+"/Results/Imagenes_finales_reconstruidas_BP/"
"""
for filename in os.listdir(scr_dir):
print(filename)
dst = "Imagen_"+str(i+10)+".png"#"Imagen_"+str(i).zfill(3)+".png" # .zfill, it zero padded a number /media/soporte/e2a2d167-bcfd-400a-91c8-f1236df2f7e4/soporte/Landslide_Project/Desarrollo/Software/Procesamiento
src = scr_dir+filename
dst = scr_dir+dst
os.rename(src,dst)
i += 1
"""
os.system("ffmpeg -framerate 5 -start_number 10 -i "+scr_dir+"Imagen_%d.png -c:v libx264 -profile:v high -crf 20 -pix_fmt yuv420p "+scr_dir+"Rec_img4.mp4")
#tx.texto()
#%% Using terminal instructions in python
import subprocess as sb
sb.check_call(['ls','-la'])
rs = sb.Popen(['ffmpeg',
'-framerate','5',
'i','image_name_%d.png',
'-c:v','libx264',
'-profile:v','high',
'-crf','20'
])
| [
"luis11.dlcp@gmail.com"
] | luis11.dlcp@gmail.com |
230bba73a64891c2a9341bff643b95076853826d | 991d0dfd3727bcebe4e19999ac69a2331b7934ac | /class_1/l01_s05.01.py | c00f9611f1868efa8dc08a4ee9ece40d39989bb7 | [] | no_license | JuanManuelHuerta/NLP_2018 | 8e00b096209b542f64e6186a4714466fe73f870e | f350c4387a7f0408cd65d7be829f716273a29b2c | refs/heads/master | 2020-03-07T15:48:21.801324 | 2018-04-12T16:30:15 | 2018-04-12T16:30:15 | 127,565,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | import sys
import json
import unicodedata
import numpy
from nltk import sent_tokenize, word_tokenize, pos_tag
import re
import operator
import math
def custom_word_tokenize(my_string):
s0=my_string
s1=re.sub(r'([a-z])\.([A-Z])',r'\1 \2',s0)
s2=re.sub(r'[;\,\:\s \(\)\-\!\?]',r' ',s1.lower())
s3=re.sub(r'\. ',r' ',s2)
words=s3.split(" ")
return words
fp=open("stop_words.txt","rt")
stop_words=set()
for line in fp:
stop_words.add(line.rstrip())
print "Loaded stop words!"
fp=open("../reviews_Automotive_5.json","rt")
all_data = []
reviews_text=[]
products_count={}
for line in fp:
review_data = json.loads(line)
all_data.append(review_data)
review_n = unicodedata.normalize('NFKD',review_data["reviewText"]).encode('ascii','ignore')
reviews_text.append(review_n)
asin = review_data["asin"]
if not asin in products_count:
products_count[asin]=0
products_count[asin]+=1
#print "Number of reviews", len(all_data)
#print "Number of products", len(products_count)
#print "Average review per product", numpy.mean(products_count.values())
#for review in reviews_text:
# print "Original Review", review
# print "nltk tokenizer", word_tokenize(review)
# print "custom tokenizer", custom_word_tokenize(review)
master_dictionary={}
dictionary_per_score={}
for review_data in all_data:
review = unicodedata.normalize('NFKD',review_data["reviewText"]).encode('ascii','ignore')
score = review_data["asin"]
if not score in dictionary_per_score:
dictionary_per_score[score]={}
words = custom_word_tokenize(review)
for word in words:
if not word in stop_words:
if not word in master_dictionary:
master_dictionary[word]=0
if not word in dictionary_per_score[score]:
dictionary_per_score[score][word]=0
master_dictionary[word]+=1
dictionary_per_score[score][word]+=1
top_words={}
InverseDocumentFrequency={}
for asin in dictionary_per_score:
for word in dictionary_per_score[asin]:
if not word in InverseDocumentFrequency:
InverseDocumentFrequency[word]=0
InverseDocumentFrequency[word]+=1
for score in dictionary_per_score:
print "ASIN", score
for word in dictionary_per_score[score]:
dictionary_per_score[score][word]=dictionary_per_score[score][word]*math.log(len(dictionary_per_score)/InverseDocumentFrequency[word])
dps_sorted = sorted(dictionary_per_score[score].items(),key=operator.itemgetter(1),reverse=True)[0:25]
for word in zip(range(len(dps_sorted)),dps_sorted):
print score, word
| [
"jmhuertany@gmail.com"
] | jmhuertany@gmail.com |
511a1c80d600660fbfcf93341b758a5bc1af00b4 | 33f1bfde0ae84747ed66946917bd94edf8e9b512 | /06 - Funktionen/Notebooks in anderen Formaten/py/Aufgabe Geburtsstatistiken (Loesung).ipynb.py | 465b2891919d780b1783435248a5793427a0bd34 | [] | no_license | LCK1635/Python_Kurs | 66bb8ce356a9c3fe41a90c2995bb63e7a81f2fe4 | 8742604e677baa3774cde9b2434c1b40470a224f | refs/heads/master | 2021-09-07T04:23:36.732602 | 2018-02-17T12:56:32 | 2018-02-17T12:56:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | # Diese .py - Datei wurde automatisch aus dem IPython - Notebook (.ipynb) generiert.
#
# Gelegentlich wurde ich von Teilnehmern gefragt, ob ich die Kursmaterialien nicht
# auch als normale .py - Datien bereitstellen könne. Dadurch ist es möglich, den Code
# ohne Jupyter zu öffnen, beispielsweise wenn Python-Programme in einem Terminal oder in
# Eclipse entwickelt werden.
#
# Dem möchte ich hiermit nachkommen. Ich empfehle dir aber trotzdem, schau' dir lieber die
# IPython - Notebooks direkt an, oder den HTML-Export eben dieser. Dieser reine .py-Export
# ist meiner Meinung nach etwas unübersichtlich.
# coding: utf-8
# ## Aufgabe!
#
# Finde heraus, wie oft der Name "Max" als männlicher Vorname in Kalifornien zwischen 1950 und 2000 (jeweils einschließlich) vergeben wurde! Verwende dazu die bereitgestellte .csv - Datei (../data/names.csv)!
# In[12]:
occurences = 0
with open("../data/names.csv", "r") as file:
for line in file:
splitted = line.strip().split(",")
if splitted[2] == "Year":
continue
year = int(splitted[2])
if splitted[1] == "Max" and year >= 1950 and year <= 2000 and splitted[3] == "M" and splitted[4] == "CA":
occurences = occurences + int(splitted[5])
print(occurences)
| [
"tillin@besonet.ch"
] | tillin@besonet.ch |
4acd426428bf36b3e05b49f55188a9d1fb157d9d | eccda8bebcf343c6c2742980a604905135485b69 | /library/f5bigip_ltm_persistence_ssl.py | f4700fcb09e98962ecc1758d5f6d19e2c719c089 | [
"Apache-2.0"
] | permissive | erjac77/ansible-module-f5bigip | 5c920dc239098d6d3a8311da3ccb9562428a8362 | 96af6d5dc77d8ccbe18cb4fdc916625756e5f9dd | refs/heads/master | 2021-01-11T08:33:52.304903 | 2020-02-14T21:42:09 | 2020-02-14T21:42:09 | 76,477,286 | 6 | 5 | Apache-2.0 | 2018-08-09T20:41:31 | 2016-12-14T16:30:04 | Python | UTF-8 | Python | false | false | 5,650 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_persistence_ssl
short_description: BIG-IP ltm persistence ssl module
description:
- Configures a Secure Socket Layer (SSL) persistence profile.
version_added: "2.4"
author:
- "Eric Jacob (@erjac77)"
options:
app_service:
description:
- Specifies the application service to which the object belongs.
defaults_from:
description:
- Specifies the existing profile from which the system imports settings for the new profile.
default: ssl
description:
description:
- Specifies descriptive text that identifies the component.
match_across_pools:
description:
- Specifies, when enabled, that the system can use any pool that contains this persistence record.
default: disabled
choices: ['enabled', 'disabled']
match_across_services:
description:
- Specifies, when enabled, that all persistent connections from a client IP address, which go to the same
virtual IP address, also go to the same node.
default: disabled
choices: ['enabled', 'disabled']
match_across_virtuals:
description:
- Specifies, when enabled, that all persistent connections from the same client IP address go to the same
node.
default: disabled
choices: ['enabled', 'disabled']
mirror:
description:
- Specifies whether the system mirrors persistence records to the high-availability peer.
default: disabled
choices: ['enabled', 'disabled']
name:
description:
- Specifies a unique name for the component.
required: true
override_connection_limit:
description:
- Specifies, when enabled, that the pool member connection limits are not enforced for persisted clients.
default: disabled
choices: ['enabled', 'disabled']
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
timeout:
description:
- Specifies the duration of the persistence entries.
default: 300
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM SSL Persistence profile
f5bigip_ltm_persistence_cookie:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_ssl_persistence
partition: Common
description: My ssl persistence profile
defaults_from: /Common/ssl
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
defaults_from=dict(type='str'),
description=dict(type='str'),
match_across_pools=dict(type='str', choices=F5_ACTIVATION_CHOICES),
match_across_services=dict(type='str', choices=F5_ACTIVATION_CHOICES),
match_across_virtuals=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mirror=dict(type='str', choices=F5_ACTIVATION_CHOICES),
override_connection_limit=dict(type='str', choices=F5_ACTIVATION_CHOICES),
timeout=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmPersistenceSsl(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.persistence.ssls.ssl.create,
'read': self._api.tm.ltm.persistence.ssls.ssl.load,
'update': self._api.tm.ltm.persistence.ssls.ssl.update,
'delete': self._api.tm.ltm.persistence.ssls.ssl.delete,
'exists': self._api.tm.ltm.persistence.ssls.ssl.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmPersistenceSsl(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| [
"erjac77@gmail.com"
] | erjac77@gmail.com |
b3a8ab3db053cd9b4995bedf92e5a175e903cd04 | 75cb00c74d10d7fd88b4fd5d3c9a13a56cbd57ac | /questions.py | 9a4f193c67e7697729d731424d77d53efd3f02c6 | [] | no_license | Muhaimeen92/Question-Answer-AI | 51a1edb9269270b408cf664f3ea738e32a484f03 | 1a2bcd6eeef2a56db7eb5071a43d039f4a77cd90 | refs/heads/main | 2023-02-18T19:42:34.006468 | 2021-01-09T23:48:02 | 2021-01-09T23:48:02 | 328,263,067 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,082 | py | import nltk
import os
import sys
import string
from collections import Counter
FILE_MATCHES = 3
SENTENCE_MATCHES = 3
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python questions.py corpus")
# Calculate IDF values across files
files = load_files(sys.argv[1])
file_words = {
filename: tokenize(files[filename])
for filename in files
}
file_idfs = compute_idfs(file_words)
# Prompt user for query
query = set(tokenize(input("Query: ")))
# Determine top file matches according to TF-IDF
filenames = top_files(query, file_words, file_idfs, n=FILE_MATCHES)
# Extract sentences from top files
sentences = dict()
for filename in filenames:
for passage in files[filename].split("\n"):
for sentence in nltk.sent_tokenize(passage):
tokens = tokenize(sentence)
if tokens:
sentences[sentence] = tokens
# Compute IDF values across sentences
idfs = compute_idfs(sentences)
# Determine top sentence matches
matches = top_sentences(query, sentences, idfs, n=SENTENCE_MATCHES)
for match in matches:
print(match)
def load_files(directory):
"""
Given a directory name, return a dictionary mapping the filename of each
`.txt` file inside that directory to the file's contents as a string.
"""
load_dict = {}
load = os.listdir(directory)
for file in load:
file_path = os.path.join(directory, file)
with open(file_path, 'r') as text:
load_dict[file] = text.read()
return load_dict
def tokenize(document):
"""
Given a document (represented as a string), return a list of all of the
words in that document, in order.
Process document by coverting all words to lowercase, and removing any
punctuation or English stopwords.
"""
words = [word.lower() for word in nltk.word_tokenize(document) if word not in string.punctuation
and word not in nltk.corpus.stopwords.words("english")]
return words
def compute_idfs(documents):
"""
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
"""
import math
N = len(documents)
idf_dict = {}
for doc, words in documents.items():
unique_words = set(words)
for word in unique_words:
if word not in idf_dict:
val = 0
for passage in documents.values():
if word in passage:
val += 1
if val != 0:
idf_dict[word] = math.log(N/val)
return idf_dict
def top_files(query, files, idfs, n):
"""
Given a `query` (a set of words), `files` (a dictionary mapping names of
files to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the filenames of the the `n` top
files that match the query, ranked according to tf-idf.
"""
from collections import Counter
tf = dict.fromkeys(query, 0)
tf_idf = {}
"""for doc, words in files.items():
freq = Counter(words)
for item in query:
for word in freq:
if item == word:
tf[item] = freq[word]
tf_idf[doc] += tf[item] * idfs[item] if item in idfs else 0"""
for doc, words in files.items():
tf_idf[doc] = 0
unique_words = set(words)
for word in unique_words:
if word in query:
tf[word] = words.count(word)
tf_idf[doc] += tf[word] * idfs[word] if word in idfs else 0
top_files = []
for count in Counter(tf_idf).most_common(n):
top_files.append(count[0])
return top_files
def top_sentences(query, sentences, idfs, n):
"""
Given a `query` (a set of words), `sentences` (a dictionary mapping
sentences to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the `n` top sentences that match
the query, ranked according to idf. If there are ties, preference should
be given to sentences that have a higher query term density.
"""
sent_score = {} #dictionary mapping a sentence to it's matching word measure and query term density as a tuple
top_sentences = []
for sentence, words in sentences.items():
sent_idf = float()
count = int()
unique_words = set(words)
for word in unique_words:
if word in query:
count += sentence.count(word)
sent_idf += idfs[word]
term_density = count / len(words)
sent_score[sentence] = (sent_idf, term_density)
for count in Counter(sent_score).most_common(n):
top_sentences.append(count[0])
return top_sentences
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
79ae737515aeebef8dfb64719fbba9e17129bb9b | 9d698397273d4a7239cc12eba31ed9dae9b0dd4d | /ejercicio4-usoDePaquetes-DateTime/dateTime.py | 13a750095f6d22c86218372ebf2147279014f6e4 | [] | no_license | operator-ita/Python-POO | a026f83015b5a8d85c08d919cbfe3967b684b4c7 | c1869523ad94a1043f01eb0cfbab49ec02713fca | refs/heads/master | 2022-03-17T03:56:41.343635 | 2019-10-02T19:21:12 | 2019-10-02T19:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # Sept, 2019
# @lunysska
# El ejemplo es para mostrar el uso de paquetes. En este caso: "datetime"
# Este método está creado para validar que un objeto de tipo Fecha es creado de
# manera correcta
from datetime import datetime
def mimetodo(fecha_str):
try:
objetoFecha = datetime.strptime(fecha_str,'%d/%m/%Y')
return True
except:
print("error")
return False
###################################################################
#Las pruebas
#Pero le falta algo al método ¿qué es?, para adecuarlo a mi clase Fecha
fecha_str = "23/12/0000"
print(mimetodo(fecha_str)) | [
"lunysska@gmail.com"
] | lunysska@gmail.com |
f6315d275ecaf4e02a4d8a4a261a789a4eceafcb | 14ab379a2f9d68d7e29d421a2a8fb3e6b2c83a0a | /main/tests/test_images.py | 5afcfee9f89aed7e80cf445e6cca791f1933c07b | [
"MIT"
] | permissive | alecstein/mataroa-personal | 8c1319f9502c16b88dc565545b9bade119a149f4 | 4c1b88dc9a113e3de392dd0ce676a6e2dd8589d0 | refs/heads/master | 2023-02-07T18:05:48.296449 | 2021-01-03T05:37:45 | 2021-01-03T05:37:45 | 326,335,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,008 | py | from django.test import TestCase
from django.urls import reverse
from main import models
class ImageCreateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.user.set_password("abcdef123456")
self.user.save()
self.client.login(username="alice", password="abcdef123456")
def test_image_upload(self):
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.assertTrue(models.Image.objects.filter(name="vulf").exists())
self.assertEqual(models.Image.objects.get(name="vulf").extension, "jpeg")
self.assertIsNotNone(models.Image.objects.get(name="vulf").slug)
class ImageCreateAnonTestCase(TestCase):
def test_image_upload_anon(self):
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
response = self.client.post(reverse("image_list"), {"file": fp})
self.assertEqual(response.status_code, 302)
self.assertTrue(reverse("login") in response.url)
class ImageDetailTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.user.set_password("abcdef123456")
self.user.save()
self.client.login(username="alice", password="abcdef123456")
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_detail(self):
response = self.client.get(
reverse("image_detail", args=(self.image.slug,)),
)
self.assertEqual(response.status_code, 200)
self.assertInHTML("<h1>vulf</h1>", response.content.decode("utf-8"))
self.assertContains(response, "Uploaded on")
class ImageUpdateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.user.set_password("abcdef123456")
self.user.save()
self.client.login(username="alice", password="abcdef123456")
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_update(self):
new_data = {
"name": "new vulf",
}
self.client.post(reverse("image_update", args=(self.image.slug,)), new_data)
updated_image = models.Image.objects.get(id=self.image.id)
self.assertEqual(updated_image.name, new_data["name"])
class ImageUpdateAnonTestCase(TestCase):
"""Tests non logged in user cannot update image."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.user.set_password("abcdef123456")
self.user.save()
self.client.login(username="alice", password="abcdef123456")
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
def test_image_update(self):
new_data = {
"name": "new vulf",
}
self.client.post(reverse("image_update", args=(self.image.slug,)), new_data)
image_now = models.Image.objects.get(id=self.image.id)
self.assertEqual(image_now.name, "vulf")
class ImageUpdateNotOwnTestCase(TestCase):
"""Tests user cannot update other user's image name."""
def setUp(self):
self.victim = models.User.objects.create(username="bob")
self.victim.set_password("abcdef123456")
self.victim.save()
self.client.login(username="bob", password="abcdef123456")
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
self.attacker = models.User.objects.create(username="alice")
self.attacker.set_password("abcdef123456")
self.attacker.save()
self.client.login(username="alice", password="abcdef123456")
def test_image_update_not_own(self):
new_data = {
"name": "bad vulf",
}
self.client.post(reverse("image_update", args=(self.image.slug,)), new_data)
image_now = models.Image.objects.get(id=self.image.id)
self.assertEqual(image_now.name, "vulf")
class ImageDeleteTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.user.set_password("abcdef123456")
self.user.save()
self.client.login(username="alice", password="abcdef123456")
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_delete(self):
self.client.post(reverse("image_delete", args=(self.image.slug,)))
self.assertFalse(
models.Image.objects.filter(name="vulf", owner=self.user).exists()
)
class ImageDeleteAnonTestCase(TestCase):
"""Tests non logged in user cannot delete image."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.user.set_password("abcdef123456")
self.user.save()
self.client.login(username="alice", password="abcdef123456")
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
def test_image_delete_anon(self):
self.client.post(reverse("image_delete", args=(self.image.slug,)))
self.assertTrue(
models.Image.objects.filter(name="vulf", owner=self.user).exists()
)
class ImageDeleteNotOwnTestCase(TestCase):
"""Tests user cannot delete other's image."""
def setUp(self):
self.victim = models.User.objects.create(username="bob")
self.victim.set_password("abcdef123456")
self.victim.save()
self.client.login(username="bob", password="abcdef123456")
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
self.attacker = models.User.objects.create(username="alice")
self.attacker.set_password("abcdef123456")
self.attacker.save()
self.client.login(username="alice", password="abcdef123456")
def test_image_delete_not_own(self):
self.client.post(reverse("image_delete", args=(self.image.slug,)))
self.assertTrue(
models.Image.objects.filter(name="vulf", owner=self.victim).exists()
)
| [
"zf@sirodoht.com"
] | zf@sirodoht.com |
79485cf6e33c80c6964e5d26bc244b105f9b931d | b7f157b2e2c3de0d02a9ecc8b7650a8efab52814 | /Backup.py | e1c7f2c8f756f01939b7586b9bf0b6fbfb2480bf | [] | no_license | ryosuzuki/fusion-addon-test | 101050b8a34c82a049de5f659038810e4120b03b | 87d426dc83051af66b94aa51b015f65d4d5e5e64 | refs/heads/master | 2021-01-19T21:47:39.270363 | 2017-09-16T17:56:41 | 2017-09-16T17:56:41 | 101,253,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,505 | py | import adsk.core, adsk.fusion, adsk.cam, traceback
import copy
from . import Fusion360CommandBase
class VisualizeCommand(Fusion360CommandBase.Fusion360CommandBase):
def onPreview(self, command, inputs):
pass
def onDestroy(self, command, inputs, reason_):
pass
def onInputChanged(self, command, inputs, changedInput):
pass
def onCreate(self, command, inputs):
pass
def onExecute(self, command, inputs):
self.project = None
self.projects = []
self.files = []
self.projectNames = {}
self.fileNames = {}
self.commandId = command.parentCommandDefinition.id
self.projectCommandId = self.commandId + '_project'
self.fileCommandId = self.commandId + '_file'
self.app = adsk.core.Application.get()
self.ui = self.app.userInterface
self.init()
def init(self):
app = adsk.core.Application.get()
ui = app.userInterface
product = app.activeProduct
rootComp = product.rootComponent
extrudes = rootComp.features.extrudeFeatures
design = adsk.fusion.Design.cast(product)
rootComp = design.rootComponent
materialLib = app.materialLibraries.itemByName("Fusion 360 Appearance Library")
appearance = materialLib.appearances.itemByName("Plastic - Matte (Yellow)") # "Paint - Enamel Glossy (Yellow)"
targetBodies = adsk.core.ObjectCollection.create()
toolBodies = adsk.core.ObjectCollection.create()
yPos = 0
yMax = 0
rMax = 0
for i in range(rootComp.bRepBodies.count):
body = rootComp.bRepBodies.item(i)
box = body.boundingBox
targetBodies.add(body)
if body.name.find("sheet") == 0:
toolBodies.add(body)
# else:
if body.name.find("sheet") == 0:
if (yPos < box.minPoint.y):
yPos = box.minPoint.y
if (yMax < box.maxPoint.y):
yMax = box.maxPoint.y
tMax = max(abs(box.maxPoint.x), abs(box.minPoint.x), abs(box.maxPoint.z), abs(box.minPoint.z))
if rMax < tMax:
rMax = tMax
rMax = rMax * 2
# ui.messageBox("%f %f %f" % (yPos, yMax, rMax))
resultBodies = adsk.core.ObjectCollection.create()
# 0 -- yPos
originalBodies = copy.copy(targetBodies)
sketches = rootComp.sketches
sketch = sketches.add(rootComp.xZConstructionPlane)
sketchCircles = sketch.sketchCurves.sketchCircles
centerPoint = adsk.core.Point3D.create(0, 0, 0)
circle = sketchCircles.addByCenterRadius(centerPoint, rMax)
prof = sketch.profiles.item(0)
distance = adsk.core.ValueInput.createByReal(yPos)
temp = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
body = temp.bodies.item(0)
combineFeatures = rootComp.features.combineFeatures
combineInput = combineFeatures.createInput(body, targetBodies)
combineInput.isKeepToolBodies = True
combineInput.operation = adsk.fusion.FeatureOperations.IntersectFeatureOperation
result = combineFeatures.add(combineInput)
resultBodies = adsk.core.ObjectCollection.create()
for body in result.bodies:
body.name = "structure"
resultBodies.add(body)
# yPos -- yPos+0.1
basePlane = rootComp.xZConstructionPlane
planes = rootComp.constructionPlanes
planeInput = planes.createInput()
offset = adsk.core.ValueInput.createByReal(yPos)
planeInput.setByOffset(basePlane, offset)
planeOne = planes.add(planeInput)
sketches = rootComp.sketches
sketch = sketches.add(planeOne)
sketchCircles = sketch.sketchCurves.sketchCircles
centerPoint = adsk.core.Point3D.create(0, 0, 0)
circle = sketchCircles.addByCenterRadius(centerPoint, rMax)
prof = sketch.profiles.item(0)
distance = adsk.core.ValueInput.createByReal(0.1)
temp = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
body = temp.bodies.item(0)
combineFeatures = rootComp.features.combineFeatures
combineInput = combineFeatures.createInput(body, targetBodies)
combineInput.isKeepToolBodies = True
combineInput.operation = adsk.fusion.FeatureOperations.IntersectFeatureOperation
result = combineFeatures.add(combineInput)
resultBodies = adsk.core.ObjectCollection.create()
for body in result.bodies:
body.name = "conductive"
body.appearance = appearance
resultBodies.add(body)
vector = adsk.core.Vector3D.create(0.0, 2.0, 0.0)
transform = adsk.core.Matrix3D.create()
transform.translation = vector
moveFeats = rootComp.features.moveFeatures
moveFeatureInput = moveFeats.createInput(resultBodies, transform)
moveFeats.add(moveFeatureInput)
# yPos + 0.1 -- yMax
basePlane = rootComp.xZConstructionPlane
planes = rootComp.constructionPlanes
planeInput = planes.createInput()
offset = adsk.core.ValueInput.createByReal(yPos+0.1)
planeInput.setByOffset(basePlane, offset)
planeOne = planes.add(planeInput)
sketches = rootComp.sketches
sketch = sketches.add(planeOne)
sketchCircles = sketch.sketchCurves.sketchCircles
centerPoint = adsk.core.Point3D.create(0, 0, 0)
circle = sketchCircles.addByCenterRadius(centerPoint, rMax)
prof = sketch.profiles.item(0)
distance = adsk.core.ValueInput.createByReal(yMax - (yPos + 0.1))
temp = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
body = temp.bodies.item(0)
combineFeatures = rootComp.features.combineFeatures
combineInput = combineFeatures.createInput(body, originalBodies)
# combineInput.isKeepToolBodies = True
combineInput.operation = adsk.fusion.FeatureOperations.IntersectFeatureOperation
result = combineFeatures.add(combineInput)
resultBodies = adsk.core.ObjectCollection.create()
for body in result.bodies:
body.name = "structure"
resultBodies.add(body)
vector = adsk.core.Vector3D.create(0.0, 2.0*2, 0.0)
transform = adsk.core.Matrix3D.create()
transform.translation = vector
moveFeats = rootComp.features.moveFeatures
moveFeatureInput = moveFeats.createInput(resultBodies, transform)
moveFeats.add(moveFeatureInput)
# resultBodies = adsk.core.ObjectCollection.create()
# for body in result.bodies:
# body.name = "result"
# body.appearance = appearance
# resultBodies.add(body)
# for i in range(rootComp.bRepBodies.count):
# body = rootComp.bRepBodies.item(i)
# if body.name.find("sheet") == 0:
# body.appearance = appearance
# toolBodies.add(body)
| [
"ryosuzk@gmail.com"
] | ryosuzk@gmail.com |
af3e92d4bb5750dc2fcd2724ac6b24821b95e473 | ae54fb5f5b67e21ba92b5b066a2e225c7610975b | /plusone/views.py | 6e7ae7f579f6c8ef564a44ef727c16b12571b5e5 | [] | no_license | tanmay17268/PlusOne | 03b1f3dfb3d70325fe631eb013967b3bc57ddb76 | 11ab382c9854db2ceb17fbd8bb930aff28e32ddd | refs/heads/master | 2020-04-12T22:49:27.643129 | 2018-12-22T09:55:02 | 2018-12-22T09:55:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request,'plusone/home2.html') | [
"noreply@github.com"
] | noreply@github.com |
25dccf51d8e7399e6f4030ca84d3e2a0e386509b | 02880f13bd89a1bfc9052ad6bcab0a410afafb2f | /python/collections - ordereddict.py | 21e81f05d17ebdb592b3733fe8d55fab9c1f4791 | [] | no_license | mason30006/hackerrank | 3942fc6187aa8302362d80059e32b1dcfd8f7970 | 09b2f340555e663065eb9f39bc5f2caf7e1ce464 | refs/heads/master | 2021-01-22T03:50:02.345671 | 2017-02-09T05:21:45 | 2017-02-09T05:21:45 | 81,468,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 08 18:59:32 2017
@author: Mason
"""
from sys import stdin
from collections import OrderedDict
ordered_dictionary = OrderedDict()
n = stdin.readline()
for i in range(int(n)):
string = stdin.readline()
string_parts = string.split(' ')
price = int(string_parts[-1])
item_name = ' '.join(string_parts[0:-1])
if item_name in ordered_dictionary.keys():
ordered_dictionary[item_name] = price + ordered_dictionary[item_name]
else:
ordered_dictionary[item_name] = price
for key in ordered_dictionary.keys():
print key + ' ' + str(ordered_dictionary[key])
| [
"mason.liang@gmail.com"
] | mason.liang@gmail.com |
5a12bfa5ef76874a0470b4d9ee429a9145413096 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/python网络编程及MySQL部分/day32/code/clock.py | 23b6fa79cbd36db567930d651006699c63e168e4 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | from multiprocessing import Process
import time
class ClockProcess(Process):
def __init__(self,value):
self.value = value
#调用基类初始化方法
Process.__init__(self)
#super().__init__(self)
#在自定义的进程类中,重写父类的这个方法
def run(self):
n = 5
while n > 0:
print('The time is {}'.format(time.ctime()))
time.sleep(self.value)
n -= 1
#用自己的进程类创建进程
p = ClockProcess(2)
if __name__ == '__main__':
#自动执行run方法
p.start()
p.join() | [
"yabing_ji@163.com"
] | yabing_ji@163.com |
ca5c998f70de4c52660ed2f7cb58a11893b49e7d | 2cc3aed1b5dfb91e3df165144d95c01a495bd54b | /581-Shortest-Unsorted-Continuous-Subarray-sort.py | 30476a3fe00dd27aa9b3f815f4590fb84f2498fa | [] | no_license | listenviolet/leetcode | f38e996148cb5d4be8f08286daac16243b3c30e4 | 0c1efcbfd35e5ef036ec1ccd0c014cd7baf2ed2b | refs/heads/master | 2020-05-01T07:35:23.462429 | 2019-12-11T12:44:32 | 2019-12-11T12:44:32 | 177,354,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | class Solution:
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
copy = []
for i in range(len(nums)):
copy.append(nums[i])
copy.sort()
start = len(nums)
end = 0
for i in range(len(nums)):
if nums[i] != copy[i]:
start = min(start, i)
end = max(end, i)
return end - start + 1 if end - start >= 0 else 0
# Description:
# Given an integer array, you need to find one continuous subarray
# that if you only sort this subarray in ascending order,
# then the whole array will be sorted in ascending order, too.
# You need to find the shortest such subarray and output its length.
# Example 1:
# Input: [2, 6, 4, 8, 10, 9, 15]
# Output: 5
# Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order
# to make the whole array sorted in ascending order.
# Note:
# Then length of the input array is in range [1, 10,000].
# The input array may contain duplicates,
# so ascending order here means <=.
# Solution:
# https://leetcode.com/problems/shortest-unsorted-continuous-subarray/solution/
# Approach #3 Using Sorting [Accepted]
# Algorithm
# We can sort a copy of the given array numsnums,
# say given by nums_sorted.
# Then, if we compare the elements of numsnums and nums_sorted,
# we can determine the leftmost and rightmost elements which mismatch.
# The subarray lying between them is,
# then, the required shorted unsorted subarray.
# Complexity Analysis
# Time complexity : O(nlogn). Sorting takes nlognnlogn time.
# Space complexity : O(n). We are making copy of original array.
# Beats: 26.94%
# Runtime: 120ms
# easy | [
"listenviolet@gmail.com"
] | listenviolet@gmail.com |
d5173c1f65f0393492ffc84bb8cf527043aaf85e | c95a75498cf2605e86ff3afa60dfcb8290a4d1ff | /Lesson6/For3.py | 1e8a192b5802db6ebf6059f1bc0aa86ea469693f | [] | no_license | farzel/PythonDersleri | 96017bb96996a86b1e82b067985f2e298b080c43 | 44040c6f103eae6de6a0334a2a2b8a06d5935d1a | refs/heads/master | 2020-06-08T10:50:12.637262 | 2019-06-30T11:20:46 | 2019-06-30T11:20:46 | 193,216,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | # disaridan girilen metni harf harf alt alta yazdiriniz
text = input('Lutfen metninizi giriniz:')
result = ' '
for i in text:
print(i) #result = i + ' '
print('-'.join(text)) | [
"noreply@github.com"
] | noreply@github.com |
056813a96995c2c95a58dbd3f2d02480808d3964 | 37c3c6fd1b05b6cf0c5f5ab89120562d7a8a40f8 | /p36.py | 86f55562115da41bd8012f1870549b9f265cbd58 | [] | no_license | kaviraj333/python | e4b480adfcbec383c1228e07426833b9c02f4296 | 7110b6e153c4ef4afe7ade8ce20104b26ea4cc8f | refs/heads/master | 2020-05-22T23:17:48.540550 | 2019-04-09T05:04:31 | 2019-04-09T05:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | amu=int(raw_input())
arr=list(map(int,raw_input().split()))
r=[]
for i in arr:
r.append(i)
b=sum(r)
if(sum(r)==22):
print("4")
elif(r[1]==3):
print("0")
else:
m=min(r)
print(m)
| [
"noreply@github.com"
] | noreply@github.com |
fd9fed8d50f3bc3779e3425c4fcf511a9684675a | bd37ff289bcbe24cc6e8ab360569713b9109265d | /logistic_regression1.py | 0e62f1edd37807715a19d1310c273bd42e8e156f | [] | no_license | Sanil2108/python-machine-learning | fc035f6ddd586cf3dab9421002d4408c03b0589c | c9dbf8a1f34aa3b80c76986c742e85a9be4b2375 | refs/heads/master | 2021-01-11T09:00:28.995823 | 2017-06-18T06:08:33 | 2017-06-18T06:08:33 | 77,436,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | import numpy as np
import matplotlib.pyplot as plt
all_cost=[]
def logistic(z):
return 1/(1+np.exp(-z))
def hypothesis(theta, X):
return logistic(np.array(np.matrix(X)*np.transpose(np.matrix(theta))))[0][0]
# return getY(theta, X)
def cost(theta, X, y):
m=len(y)
total=0
for i in range(m):
total+=(y[i]*np.log(hypothesis(theta, X[i])) + (1-y[i])*np.log(1-hypothesis(theta, X[i])))
return -total/m
def gradient_descent(X, y, alpha):
tempCost=1000
while(tempCost>0.01):
for j in range(len(theta)):
pd=0
for i in range(len(y)):
pd+=(hypothesis(theta, X[i])-y[i])*X[i][j]
theta[j]=theta[j]-alpha*pd
all_cost.append(tempCost)
if(tempCost-cost(theta, X, y)<1e-50):
break
tempCost=cost(theta, X, y)
print(tempCost)
print(theta)
# temp_x = np.linspace(0, len(all_cost), len(all_cost) + 1)
# for i in range(len(all_cost)):
# plt.plot(temp_x[i], all_cost[i], 'ro')
# plt.show()
return theta
#X is an (n+1) row vector
def getY(theta, X):
if(np.array(np.matrix(X)*np.transpose(np.matrix(theta)))>=0.5):
return 1
else:
return 0
# new dataset for a circular decision boundary
X = [
[1, 0, 0, 0, 0, 0],
[1, 0.5, 0.25, -0.5, 0.25, -0.25],
[1, 0.5, 0.25, 0.5, 0.25, 0.25],
[1, - 0.5, 0.25, -0.5, 0.25, 0.25],
[1, -0.5, 0.25, 0.5, 0.25, -0.25],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, -1, 1, -1],
[1, -1, 1, 1, 1, -1],
[1, -1, 1, -1, 1, 1],
[1, 0, 0, 1, 1, 0],
[1, 0, 0, -1, 1, 0],
[1, 1, 1, 0, 0, 0],
[1, -1, 1, 0, 0, 0]
]
y = [
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1
]
theta = [
0,
0,
0,
0,
0,
0
]
alpha = 0.05
gradient_descent(X, y, alpha) | [
"sanilkhurana7@gmail.com"
] | sanilkhurana7@gmail.com |
74f18e356d9fe201db24ff1b68518f244b65d841 | c85a6d674679780ee510b5c8c3dbcbdecc859f64 | /test/test_group.py | 712c217d06209ae2dd8bfe1aca97dc90f5576fcd | [] | no_license | cbrowet-axway/APIM_sdk | d4f4a124e86a7b2e65d0ef07b54c68e95de68337 | 4f82df67ebe3dd6eae645bab8f86e72c0347ee24 | refs/heads/master | 2020-05-25T13:22:35.802350 | 2020-04-16T09:25:21 | 2020-04-16T09:25:21 | 187,820,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | # coding: utf-8
"""
API Manager API v1.3
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.3.0
Contact: support@axway.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.group import Group # noqa: E501
from swagger_client.rest import ApiException
class TestGroup(unittest.TestCase):
"""Group unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGroup(self):
"""Test Group"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.group.Group() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"cbro@semperpax.com"
] | cbro@semperpax.com |
7d59d321b730c74962c2ed7556c54789f5d402ca | 0e8a7ff6b223f044beb3e915cf7ca26e46665c6a | /mykaggle/feature/platform_to_pub_dev.py | 6ad910d891bc5d62dcd228f03fd809e8d74db1b0 | [] | no_license | cfiken/atmacup8 | be982d746a6e30e479bc10f4d6d8cd0def222cd4 | 0f36624b52c9652a469a0506ec847d4e3d77d0de | refs/heads/master | 2023-02-01T02:42:01.293700 | 2020-12-14T16:45:05 | 2020-12-14T16:45:05 | 318,500,923 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | from typing import Optional, Dict
import pandas as pd
from sklearn.decomposition import PCA
from mykaggle.feature.base import Feature
from mykaggle.transform.pivot import PivotTransform
COLUMNS = [
'Publisher',
'Developer'
]
class PlatformToPubDev(Feature):
'''
Platform を Publisher & Developer 情報からエンコード
'''
def __init__(self, train: bool = True, n_components: int = 2) -> None:
super().__init__(name='platform_to_pub_dev', train=train)
self.n_components = n_components
def create(
self,
base: pd.DataFrame,
others: Optional[Dict[str, pd.DataFrame]] = None,
*args, **kwargs
) -> pd.DataFrame:
df_main = others['main'].copy()
df_another = others['another'].copy()
if self.train:
df_whole = pd.concat([df_main, df_another])
else:
df_whole = pd.concat([df_another, df_main])
all_columns = []
for c in COLUMNS:
transform = PivotTransform(indices=['Platform'], column=c, target='id', aggs=['count'], fillna=0)
pub_to_c = transform(df_whole)
df_pca = pd.DataFrame(self._pca_transform(pub_to_c, self.n_components))
pub_to_c = pd.concat([pub_to_c, df_pca], axis=1)
pub_to_c = pub_to_c.iloc[:, [0] + list(range(-1, -self.n_components - 1, -1))]
pca_columns = ['_'.join(['pca', str(n), 'count_id_pivotby_Platform_for', c])
for n in range(self.n_components)]
all_columns.extend(pca_columns)
pub_to_c.columns = ['Platform'] + pca_columns
df_main = pd.merge(df_main, pub_to_c, how='left', on='Platform')
return df_main.loc[:, all_columns]
def _pca_transform(self, df: pd.DataFrame, n_components: int):
pca = PCA(n_components)
return pca.fit_transform(df.drop('Platform', axis=1).values)
| [
"cfiken@gmail.com"
] | cfiken@gmail.com |
9dfd7670fe6d2074e93e051ee1d5617c0a558db3 | 6223dc2e5de7921696cb34fb62142fd4a4efe361 | /.metadata/.plugins/org.eclipse.core.resources/.history/2/60c520da1a6b00141928c597445b4e35 | 647638267cb95a600b934baacecbaa24743a5a04 | [] | no_license | Mushirahmed/python_workspace | 5ef477b2688e8c25b1372f546752501ee53d93e5 | 46e2ed783b17450aba29e4e2df7b656522b2b03b | refs/heads/master | 2021-03-12T19:24:50.598982 | 2015-05-25T10:23:54 | 2015-05-25T10:23:54 | 24,671,376 | 0 | 1 | null | 2015-02-06T09:27:40 | 2014-10-01T08:40:33 | Python | UTF-8 | Python | false | false | 5,280 | #!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
#from operator import add
#import copy
#from gnuradio import gr
import gras
class expo(gras.Block):
"""
docstring for block expo
"""
def __init__(self):
gras.Block.__init__(self,
name="expo",
in_sig=[numpy.float32],
out_sig=[])
def set_parameters(self,g,a,b):
self.gama=g
self.alpha=a
self.beta=b
def yield_times(self):
from datetime import date, time, datetime, timedelta
start = datetime.combine(date.today(), time(0, 0))
yield start.strftime("%S")
while True:
start += timedelta(seconds=0.5)
yield start.strftime("%S")
def work(self, input_items, output_items):
#in0 = input_items[0]
#out = output_items[0]
tmrg = []
o1 = []
o2 = []
o3 = []
ans = []
final_output = []
gen = self.yield_times()
for ii in range(20):
tmrg.append(gen.next())
# print "tmrg :",tmrg
"""for i1 in range(0,10):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1
for i2 in range(0,10):
o2.append(((self.gama)*(-numpy.exp(self.alpha)))/(self.alpha*(self.beta-self.alpha)))
print "o2 : ",o2
for i3 in range(0,10):
o3.append(((self.gama)*(-numpy.exp(self.beta)))/(self.beta*(self.alpha-self.beta)))
print "o3 : ",o3
#ans.append(o1+o2+o3)
for i in range(0,10):
ans.append(list(numpy.array(o1[i])+numpy.array(o2[i])+numpy.array(o3[i])))
print "Final Ans : ",ans
print "Type out : ",type(out)
print "Type ans :",type(ans)
out = copy.copy(ans)
#out[0:1] = ans
print "Output is : " ,out
self.consume(0,1)
self.produce(0,1)"""
#o1.append((self.gama)/(self.alpha*self.beta))
#print "o1 : ", o1
for i in range(0,20):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1[i]
o2.append(((self.gama)*(numpy.exp(-(self.alpha*i)))/(self.alpha*(self.beta-self.alpha))))
print "o2 : ",o2[i]
o3.append(((self.gama)*(numpy.exp(-(self.beta*i)))/(self.beta*(self.alpha-self.beta))))
print "o3 : ",o3[i]
ans.append(o1[i]-o2[i]+o3[i])
print "Final Ans : ",ans
"""for i in range(0,len(ans)):
#out = copy.copy(ans[i])
#out[0:1] = ans
#print "Output is : " ,out"""
"""for i1 in range(0,len(ans)):
final_output.append(o1+ans[i1])
print "Final OutPut : ", final_output"""
output_items[0][:1] = ans[15]
#print "Output Sent : ", output_items[i1]
#out[:len(final_output)] = copy.copy(final_output)
self.consume(0,1)
self.produce(0,1)
"""result = []
for i in range(0,20):
result.append(numpy.exp(i))
print "Result : ",result
out[0] = result
self.consume(0,1)
self.produce(0,1) """
#o2 = -numpy.exp(-2*in0[0:1])
#o3 = -numpy.exp(-3*in0[0:1])
#o2=numpy.exp(-(in0[0:1]*self.alpha))
#print("o2 :",o2)
#o3=numpy.sin((self.freq*in0[0:1])+(self.sigma))
#print("o3 :",o3)
#o4=numpy.sqrt(o1-numpy.square(self.zita))
#print("o4 :",o4)
"""ans = o1-(mul/o4)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
print("Final Value : ",ans)
out[0:1] = ans"""
#o2 = -numpy.exp(-2*tmrg)
#o3 = -numpy.exp(-3*in0[0:1])
#o2 = numpy.exp(-in0[0:1]*self.alpha)
#o3 = numpy.exp(-in0[0:1]*self.beta)
#o4 = numpy.sqrt(1-numpy.square(self.alpha))
#ans = 1-((o2*o3)/o4)
#ans.append(o2)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
#print("Final Value : ",ans)
#out[0:1] = ans
#out = copy.copy(ans)
#self.consume(0,1)
#self.produce(0,1)
#return len(output_items[0])
| [
"imushir@gmail.com"
] | imushir@gmail.com | |
08a346d5f12cb87aa684c38f6cbe12ec40f7da8b | 7e86bb5ba4191e60999abac78c0c4a48e91b7e8b | /Python Logger V3/serial_com.py | c43bf593564755e2981768173bba1cefcaa52363 | [] | no_license | XOnanoSmartfoam/XOnanoBinAnalysis | 21563a1b6a4b8e67521602857370a90a8051b7a5 | 76bfb9bb150c919f8680bf88070a451a45f780a8 | refs/heads/master | 2021-01-12T08:13:03.271618 | 2018-05-01T14:02:12 | 2018-05-01T14:02:12 | 76,505,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,901 | py | import serial
import threading
#import struct
#import re
import csv
class Serial_com(threading.Thread):
def __init__( self,
data_q,
port_num,
port_baud,
port_bytesize=serial.EIGHTBITS,
port_parity=serial.PARITY_NONE,
port_stopbits=serial.STOPBITS_ONE,
port_timeout=0.01,
port_xonxoff=0, # software flow control
port_rtscts=0, # hardware (RTS/CTS) flow control
port_dsrdtr=True # hardware(DSR/DTR) flow control
):
threading.Thread.__init__(self)
self.serial_port = None
self.serial_arg = dict( port=port_num,
baudrate=port_baud,
bytesize=port_bytesize,
parity=port_parity,
stopbits=port_stopbits,
timeout=port_timeout,
xonxoff=port_xonxoff,
rtscts=port_rtscts,
dsrdtr=port_dsrdtr
)
self.data_q = data_q
self.new_data = False
#self.dataBuffer = bytearray()
self.dataBuffer_list = []
self.lineBuffer = ''
#self.start_seq = bytearray([0,0,0])
self.start_seq_str = [0,0,0]
self.alive = threading.Event()
self.alive.set()
def run(self):
try:
if self.serial_port:
self.serial_port.close()
self.serial_port = serial.Serial(**self.serial_arg)
self.serial_port.setDTR(True)
except serial.SerialException as e:
#self.error_q.put(e.message)
print ('serial error')
return
while self.alive.isSet():
# read data
while(self.serial_port.inWaiting()):
char = self.serial_port.read(1).decode("utf-8")
if char == '\r':
self.dataBuffer_list.append(self.lineBuffer)
self.lineBuffer = ''
elif char != ' ':
self.lineBuffer += char
if len(self.dataBuffer_list) > 1:
line = list(map(int,self.dataBuffer_list[0].split(',')))
self.dataBuffer_list.pop(0)
if line[0:3] == [0,0,0]:
# start line
num_bytes = line[3]
sensor_zero = line[4]
area_score = line[5]
data = list(map(int,self.dataBuffer_list[0].split(',')))
self.dataBuffer_list.pop(0)
if len(data) != num_bytes:
print('bad packet? data length is ' + str(len(data)) + ' and num bytes expected is ' + str(num_bytes))
self.data_q.append([sensor_zero, area_score, data])
self.new_data = True
print('received event')
#
# if (self.serial_port.inWaiting()):
# # Read the data from the line
# data = self.serial_port.read(self.serial_port.inWaiting()).decode("utf-8")
# # split based on carriage returns
# data_lines = data.split('\r')
# self.dataBuffer_list[-1] += data_lines[0]
# if len(data_lines) > 1:
# self.dataBuffer_list += data_lines[1:]
#
# for line in csv.reader(data_lines):
# #data = re.compile(',\s?').split(line)
#
# #print(data)
# #if line[0:3] == [0, 0, 0]:
# if line != '':
# print(line)
#
# index = self.dataBuffer.find(self.start_seq)
# if index >= 0 and len(self.dataBuffer) >= index + 5:
# data_length = struct.unpack(">H",self.dataBuffer[index+3:index+5])[0]
# # TODO check if data length is sensible
# if len(self.dataBuffer) >= index + 5 + data_length:
# self.data_q.append(struct.unpack(str(self.data_length)+'c', \
# self.dataBuffer[index+5:index+5+data_length]))
# self.dataBuffer = self.dataBuffer[index+5+data_length:]
# clean up
if self.serial_port:
self.serial_port.close() | [
"lorenzo@2020armor.com"
] | lorenzo@2020armor.com |
58c9538422d3e0c3acdb95a227e69e976ea8c955 | 9406178490c499e008eeef96030ee8c781043344 | /test/timing_tester.py | cb1a57b1d1e977cb686474a10d2b90fd9946e2b1 | [] | no_license | johnbooch/ConwaysGOF | ffbf419e7e0b15abc2b9236ee3c0c0f10558ed85 | 8f315750d02062f8150ece52347139905d5bdf84 | refs/heads/master | 2020-03-26T20:48:37.709958 | 2018-09-10T19:27:54 | 2018-09-10T19:27:54 | 145,347,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,976 | py | import subprocess
import os
import time
def test(cmds, exec_time):
for cmd in cmds:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(exec_time) # Allow simulation to run for 10 seconds
p.terminate()
COMMANDS_50x50 = [
["python src/gof.py --rows 50 --cols 50 --world random --perc 25 --alg linear --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 25 --alg roll --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 25 --alg conv --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 25 --alg set --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 50 --alg linear --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 50 --alg roll --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 50 --alg conv --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 50 --alg set --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 75 --alg linear --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 75 --alg roll --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 75 --alg conv --time"],
["python src/gof.py --rows 50 --cols 50 --world random --perc 75 --alg set --time"],
]
COMMANDS_100x100 = [
["python src/gof.py --rows 100 --cols 100 --world random --perc 25 --alg linear --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 25 --alg roll --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 25 --alg conv --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 25 --alg set --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 50 --alg linear --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 50 --alg roll --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 50 --alg conv --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 50 --alg set --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 75 --alg linear --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 75 --alg roll --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 75 --alg conv --time"],
["python src/gof.py --rows 100 --cols 100 --world random --perc 75 --alg set --time"],
]
COMMANDS_500x500 = [
["python src/gof.py --rows 500 --cols 500 --world random --perc 25 --alg linear --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 25 --alg roll --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 25 --alg conv --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 25 --alg set --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 50 --alg linear --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 50 --alg roll --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 50 --alg conv --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 50 --alg set --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 75 --alg linear --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 75 --alg roll --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 75 --alg conv --time"],
["python src/gof.py --rows 500 --cols 500 --world random --perc 75 --alg set v"],
]
COMMANDS_1000x1000 = [
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 25 --alg linear --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 25 --alg roll --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 25 --alg conv --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 25 --alg set --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 50 --alg linear --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 50 --alg roll --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 50 --alg conv --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 50 --alg set --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 75 --alg linear --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 75 --alg roll --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 75 --alg conv --time"],
["python src/gof.py --rows 1000 --cols 1000 --world random --perc 75 --alg set --time"],
]
def main():
test(COMMANDS_50x50, 5)
test(COMMANDS_100x100, 10)
test(COMMANDS_500x500, 20)
test(COMMANDS_1000x1000, 25)
if __name__ == "__main__":
main() | [
"johnbuccieri@gmail.com"
] | johnbuccieri@gmail.com |
5ffbe998cc682b453dc2485906ca1758e0499efa | ec3bce1dfe435e6271f7be9b773e1f417cd4746e | /models/models.py | 1df480db84f05192bdd9f74732d52478dae94d90 | [] | no_license | alien010101/murr | 74f415928bcd2f28a40717271b639ba2b4c15086 | f32caca96fe66aa35c77aef6b7d1016e5273c14c | refs/heads/master | 2022-02-15T19:40:20.529038 | 2019-07-19T16:26:44 | 2019-07-19T16:26:44 | 197,807,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class claves_sat(models.Model):
_name = 'inventario_expandido.clave_sat'
_rec_name = 'clave'
clave = fields.Integer('Clave SAT')
class inventario_expandido(models.Model):
_inherit = 'product.template'
ubicacion = fields.Char('Ubicación', size=16)
pedimento = fields.Integer('Pedimento')
clave_sat = fields.Many2one('inventario_expandido.clave_sat', string ='Clave SAT')
class orden_entrega_expandido(models.Model):
_inherit = 'stock.picking'
empleado = fields.Many2one('hr.employee', string = 'Surtido por') | [
"noreply@github.com"
] | noreply@github.com |
e61ee9fe6455a99ff23ec3f7d31d68c0f3408062 | 1dd4ae2d974d65e86538e49f84179b3ec6b8476c | /build/robotiq/robotiq_modbus_tcp/catkin_generated/pkg.develspace.context.pc.py | c68a3cb9bf4efb70df639a4b81765748f3b0d9b8 | [] | no_license | tony23545/bulldog_ws | e115510d87980c90b308ae881c59d4e6145964c0 | d3e03aa230e9366023df383665cf6be928d68c8d | refs/heads/master | 2022-11-30T06:21:04.073397 | 2019-07-08T07:33:52 | 2019-07-08T07:33:52 | 176,073,396 | 5 | 0 | null | 2022-11-21T21:13:17 | 2019-03-17T08:11:32 | Makefile | UTF-8 | Python | false | false | 389 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robotiq_modbus_tcp"
PROJECT_SPACE_DIR = "/home/shengjian/bulldog_ws/devel"
PROJECT_VERSION = "1.0.0"
| [
"csj15thu@gmail.com"
] | csj15thu@gmail.com |
b81f580bfd884ff1bbcd428a82ed1131ae1d6e8d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_75/717.py | 36146101c64393b9b35cbf7d17c8eadde15d28f0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | def solvecase(L):
C = int(L[0])
D = int(L[C+1])
N = int(L[C+D+2])
F = L[1:C+1]
X = L[C+2:C+D+2]
S = L[-1]
Q = []
for s in S:
#get spell from list
Q.append(s)
#send recent spells to check combination
if len(Q) > 1:
comb = chkcombine(F,Q[-1],Q[-2])
if comb!=None:
Q.pop()
Q.pop()
Q.append(comb)
#check for opposing spells
for i in range(len(Q)-1):
if chkoppose(X,Q[i],Q[-1]):
#destroy everything
Q = []
break
return Q
def chkcombine(formulalist,s1,s2):
for formula in formulalist:
if (formula[0]==s1 and formula[1]==s2) or (formula[1]==s1 and formula[0]==s2):
return formula[2]
return None
def chkoppose(opposelist,s1,s2):
for oppose in opposelist:
if (oppose[0]==s1 and oppose[1]==s2) or (oppose[1]==s1 and oppose[0]==s2):
return True
return False
N = int(input())
for n in range(N):
r = solvecase(input().split(' '))
print("Case #",str(n+1),": [",sep='',end='')
print(", ".join(r),sep='',end='')
print(']')
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e93547bf14bd83cd87321778faba4ea91883f376 | 263aa24cb9b4489334b355c09e66f033e037f7d2 | /latihan_private_variable/venv_private/bin/easy_install | b0f60ec8097a7e79eb1be1f899bf7397c977f285 | [] | no_license | rexsidirtjump/python-oop | 596f3dd21eff9975698d03a779869c3ff1b187fa | 9867b4a12b03f8bc48c74ed0af1eadb5ba65308c | refs/heads/master | 2020-06-02T05:19:58.445980 | 2019-06-11T12:08:32 | 2019-06-11T12:08:32 | 191,039,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | #!/home/rexdev/python/oop_python/latihan_private_variable/venv_private/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"a9rexsi22@gmail.com"
] | a9rexsi22@gmail.com | |
c95761a71ce8c525e53f512f2e7d77c406ccb524 | 6531592cf7a73b103ff11ddb5bdba6ef9293963a | /app/app.py | c5fc36c0f22d8e5ca2c5ad491c1a42766ef86361 | [] | no_license | AidynUbingazhibov/CSLR-tool | f7c7aa0c9fffe0a3ca3889c069870f6f675b2f24 | 8313270c8a1bb55557c450a95070dbb779c4392a | refs/heads/main | 2023-04-19T11:38:09.751613 | 2021-04-30T14:58:02 | 2021-04-30T14:58:02 | 363,168,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | import streamlit as st
import pandas as pd
import streamlit as st
import numpy
import sys
import os
import tempfile
sys.path.append(os.getcwd())
from app_model import main as m
from app_model import trigger_rerun
import cv2
import time
import utils.SessionState as SessionState
from random import randint
from streamlit import caching
import streamlit.report_thread as ReportThread
from streamlit.server.server import Server
import copy
from components.custom_slider import custom_slider
from decord import VideoReader
from decord import cpu, gpu
import pandas as pd
from torchvision import transforms
import numpy as np
from PIL import Image
sys.path.append("CSLR/stochastic-cslr")
import stochastic_cslr
import torch
import glob
import time
# Security
#passlib,hashlib,bcrypt,scrypt
import hashlib
def make_hashes(password):
return hashlib.sha256(str.encode(password)).hexdigest()
def check_hashes(password,hashed_text):
if make_hashes(password) == hashed_text:
return hashed_text
return False
# DB Management
import sqlite3
conn = sqlite3.connect('data.db')
c = conn.cursor()
# DB Functions
def create_usertable():
c.execute('CREATE TABLE IF NOT EXISTS userstable(username TEXT,password TEXT)')
def add_userdata(username,password):
c.execute('INSERT INTO userstable(username,password) VALUES (?,?)',(username,password))
conn.commit()
def login_user(username,password):
c.execute('SELECT * FROM userstable WHERE username =? AND password = ?',(username,password))
data = c.fetchall()
return data
def view_all_users():
c.execute('SELECT * FROM userstable')
data = c.fetchall()
return data
def main():
#st.title("")
menu = ["Home", "Login","SignUp"]
choice = st.sidebar.selectbox("Menu",menu)
result = 0
if choice == "Login":
username = st.sidebar.text_input("User Name")
password = st.sidebar.text_input("Password",type='password')
if st.sidebar.checkbox("Login"):
create_usertable()
hashed_pswd = make_hashes(password)
result = login_user(username,check_hashes(password,hashed_pswd))
if result:
st.success("Logged In as {}".format(username))
with open("/app/app/upload.txt", "w") as f:
f.write("1")
else:
st.warning("Incorrect Username/Password")
elif choice == "SignUp":
st.subheader("Create New Account")
new_user = st.text_input("Username")
new_password = st.text_input("Password",type='password')
if st.button("Signup"):
create_usertable()
add_userdata(new_user,make_hashes(new_password))
st.success("You have successfully created a valid Account")
st.info("Go to Login Menu to login")
elif choice == "Home":
with open("/app/app/upload.txt", "r") as f:
res = int(f.readline())
if res:
st.sidebar.markdown("Signer id: **51**")
m()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
228dbf7f727ca331a3bc4e74ff4fc8b813cec90c | 3970c0ed6b4b62699126cb82390ecedc78de2ded | /bin/pip3 | d8ccc96ba661ed114ef2da4a853fa238f81a2d37 | [] | no_license | nexusme/data_process_tools | 6a130b7e1054b7026814c229405f427becf0305b | 12f7ac9c7d9ba0f32a5feb35777760e929af900a | refs/heads/main | 2023-03-02T06:46:35.832300 | 2021-01-27T11:11:23 | 2021-01-27T11:11:23 | 320,497,191 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | #!/Users/nexus/data_pre_process/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"you@nexus_me@hotmail.com"
] | you@nexus_me@hotmail.com | |
5cc1d06f45ada335efd2de95fba28d718ea96957 | b14ef01ccadbe31ce633d39e14dea91fd75b6f10 | /mlops/ml_workflow/encode_target.py | 554d8b7cf6e30ffcd53c6e022a41665f89050d6a | [] | no_license | yogii786/MLOps-1 | 38dfaf048bae5f776ea2dfd472bbb62aa2618b83 | eeba37f96b1a24a4f7bbffa5facb96b91fb4be38 | refs/heads/master | 2023-08-25T00:56:08.179906 | 2021-10-30T16:55:43 | 2021-10-30T16:55:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from typing import List, Tuple
import numpy as np
from sklearn.preprocessing import LabelEncoder
def get_targetencoder_and_encoded_targets(
target: List[str],
) -> Tuple[LabelEncoder, np.array]:
le = LabelEncoder()
encoded_target = le.fit_transform(target)
return le, encoded_target
| [
"kumar.utsav@freshworks.com"
] | kumar.utsav@freshworks.com |
7d35d804f51a5c1a2722cae661d7ea21d7033210 | 4426f894af42a7c29ddf20a976f2205834fe3123 | /006_course_documents/computer_vision/week3_4_5/week3/第3周优秀作业/线性回归和逻辑回归/15290877632/week3 linear regression assignment.py | d82cc509c7402ba1115da3f1fdd4d6cd09f8d91b | [] | no_license | yuchen-he/kaikeba_report | f2e90daedfe1699ff9891d266a0d7b70dc75e526 | e0c7b21b574ab1981fe37b1c6b604499ea7a4ef5 | refs/heads/master | 2021-07-07T00:01:34.052718 | 2020-03-24T01:05:18 | 2020-03-24T01:05:18 | 226,459,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | #!/usr/bin/env python
# coding: utf-8
# In[473]:
import numpy as np
import random
import matplotlib.pyplot as plt
# # linear regression model estimation
# In[457]:
def predict(w, b, gt_x):
return w * gt_x + b
# # loss function define
# In[13]:
def loss_function(gt_x,gt_y,w,b):
avg_loss = 0
pred_y = predict(w,b,gt_x)
diff = pred_y - gt_y
avg_loss = np.dot(diff,diff)
avg_loss /= 2*len(gt_y)
return avg_loss
# # gradient calculation
# In[14]:
def cal_step_gradient(batch_gt_x,batch_gt_y,w,b,lr):
dw = np.zeros((1,5))
db = 0
pred_y = predict(w,b,gt_x)
diff = pred_y - gt_y
dw = np.dot(diff,gt_x)
db = diff
w = w - lr * dw
b = b - lr * db
return w,b
# # generate data
# In[421]:
def gen_sample_data():
w = random.randint(0,10) + random.random()
a = random.randint(0,10)
b = 50 * np.random.randn(1,100)+ np.random.normal(a,1,(1,100))
x = np.random.uniform(0,100,size=100)
y = w * x + b
return x,y
# In[463]:
x,y = gen_sample_data()
plt.scatter(x,y)
plt.show()
# In[464]:
def train(x,y,batch_size,lr,max_iterations):
w,b = 0,0
num_sample = len(x)
for i in range(max_iterations):
batch_idxs = np.random.choice(len(x),batch_size)
batch_x = [x[j] for j in batch_idxs]
batch_y = [y[j] for j in batch_idxs]
w,b = cal_step_gradient(batch_x, batch_y, w, b, lr)
return w,b
# In[474]:
w,b = train(x,y,batch_size=100,lr=0.0005,max_iterations=100)
# In[472]:
plt.title("linear regression")
plt.xlim(0,100)
plt.ylim(0,1000)
plt.plot(w,b,color='r')
plt.scatter(x,y)
plt.show
| [
"Yuchen_He@n.t.rd.honda.co.jp"
] | Yuchen_He@n.t.rd.honda.co.jp |
e9b8fb31c75cc3d0bbc3f933899d16e361744c67 | f1a6fd7ff5dcc950c9b15143bb7d965a51135dc1 | /ngram.py | d5b06a570c07a35c75a2e18d5bdfca460ef4d63d | [] | no_license | huanghao/learning-nlp | 6608ae185324b349c73669c0b63a0a2048029a17 | 936838d62fc7651e7b359fa07e30982a06f2cedd | refs/heads/master | 2021-03-12T22:57:15.518912 | 2014-12-01T23:17:25 | 2014-12-01T23:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | def ngram(text, n):
grams = []
for word in text:
grams.append(word)
if len(grams) == n:
yield tuple(grams)
grams.pop(0)
bigram = lambda text: ngram(text, 2)
trigram = lambda text: ngram(text, 3)
def test(text):
print '-' * 40
print 'text:', text
print
print 'bi-grams:'
for item in bigram(text):
print item
print
print 'tri-grams:'
for item in trigram(text):
print item
print
print '5-grams:'
for item in ngram(text, 5):
print item
if __name__ == '__main__':
from book import sent1, text1
test(sent1)
test(text1[:30])
| [
"hao.h.huang@intel.com"
] | hao.h.huang@intel.com |
c91fc9202fd322db7ceccfcb54177dbf207bd437 | 209971c33b13b3641be55d9e8f880055ea712b0e | /factcore/works/smtworks.py | cee10ed28daf4758c663ba3eb5f915555a5035b3 | [] | no_license | FriedrichWang/factorytool | 675436997d284e4e8329cfa3b11f8e310599621b | 293cd07060c5923ea7bb4e2c0083a8f5aa6e6c3a | refs/heads/master | 2021-01-10T17:22:33.735005 | 2015-12-15T06:18:36 | 2015-12-15T06:18:36 | 47,337,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | #encoding=utf8
from factcore.works.workflow import BaseWork
class SnWriteWork(BaseWork):
def __init__(self, ctx):
super(SnWriteWork, self).__init__(u'Sn烧写', ctx)
class ScreenWork(BaseWork):
def __init__(self, uicb):
super(ScreenWork, self).__init__(u'屏幕测试', uicb)
class BacklightWork(BaseWork):
def __init__(self, uicb):
super(BacklightWork, self).__init__(u'背光测试', uicb)
class SensorWork(BaseWork):
def __init__(self, uicb):
super(SensorWork, self).__init__(u'传感器测试', uicb)
class AudioWork(BaseWork):
def __init__(self, uicb):
super(AudioWork, self).__init__(u'音频测试', uicb)
class RecordWork(BaseWork):
def __init__(self, uicb):
super(RecordWork, self).__init__(u'录音测试', uicb)
| [
"zhangyang@yuewen.com"
] | zhangyang@yuewen.com |
97f962ce6c17f6babfe9ca499eb8d54b7c02b803 | ba1066b0860a73020eb5c4ee0021f68e3639327c | /Sujet 1/evaluation.py | 2a1b30bdb920408611258d3a5c7a66af323e27fe | [] | no_license | Hiestaa/TARP-ODNL | cf51678ce4940d2d84a167317eb70298863cc9b1 | 3a09054558ddc188f80abfd13ea51e1e99d64d68 | refs/heads/master | 2021-01-25T07:27:54.313545 | 2014-01-13T01:14:33 | 2014-01-13T01:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | from machine import Machine
from task import Task
import Log
import time
import os.path
class Evaluation:
def __init__(self, tasks, sequence, id):
self.tasks = []
for ti in sequence:
self.tasks.append(tasks[ti])
self.nbtasks = len(self.tasks)
self.taskcomplete = 0
self.machinelist = None
self.time = 0
self.log = None
#self.log = Log.Log('log/last.log.html')
self.id = id
def fast(self) :
tab = []
for t in self.tasks:
copytask = []
for op in t.oplist:
copytask.append(op)
tab.append(copytask)
nbLines = len(tab[0])
nbColonnes = len(tab)
i = 1
while i < nbLines :
tab[0][i] = tab[0][i - 1] + tab[0][i]
i += 1
j = 1
while j < nbColonnes :
tab[j][0] = tab[j - 1][0] + tab[j][0]
i = 1
while i < nbLines :
if tab[j - 1][i] > tab[j][i - 1] :
tmp = tab[j - 1][i]
else :
tmp = tab[j][i - 1]
tab[j][i] = tab[j][i] + tmp
i += 1
j += 1
return tab[nbColonnes - 1][nbLines - 1]
# c = np.zeros((n + 1, m + 1))
# for i in range(1, n + 1):
# for j in range(1, m + 1):
# c[i, j] = max(c[i - 1, j], c[i, j - 1]) + tasks[order[i - 1], j - 1]
# return c[n, m]
def ontaskdone(self, task):
self.taskcomplete += 1
self.log.log_event_success(self.time, 'TaskEvent',"A task has been finished: " +str(task.id))
def onopdone(self):
self.log.log_event(self.time, 'TaskEvent', "An operation has been finished on first machine !")
if len(self.tasks):
task = self.tasks.pop(0)
task.reinit()
self.machinelist.assignTask(task, self.onopdone, self.ontaskdone)
def findUniqueName(self, name):
lst = name.split('-')
for x in range(len(lst)):
if x is not 0:
test = reduce(lambda a, b: a + '-' + b,lst[:x])
if not os.path.isfile('log/' + test + '.log.html'):
return 'log/' + test + '.log.html'
return 'log/' + name + '.log.html'
def simulation(self):
self.log = Log.Log(self.findUniqueName(self.id))
self.log.log_init_tasklist(self.tasks)
self.log.log_event_info(self.time, 'Execution', "Execution started !")
task = self.tasks.pop(0)
task.reinit()
k = 0
for op in task.oplist:
m = Machine(k, self.log)
k += 1
if not self.machinelist:
self.machinelist = m
else:
tmp = self.machinelist
while tmp.next:
tmp = tmp.next
tmp.next = m
self.log.log_event(self.time, 'Execution', str(self.machinelist.getNbMachines()) + " machines added to process operations.")
self.machinelist.assignTask(task, self.onopdone, self.ontaskdone)
while self.taskcomplete is not self.nbtasks:
#print self.time,
self.time += 1
self.machinelist.update(self.time)
self.log.log_event_success(self.time, 'Execution', "All tasks done, execution successfully done !")
self.log.log_init_machines()
m = self.machinelist
while m:
self.log.log_machine_state(m.id, m.total_working_time, m.total_waiting_time, m.work_history)
m = m.next
self.log.log_close()
return self.time
if __name__ == '__main__':
tasks = [
Task(1, [10, 40, 30]),
Task(2, [20, 50, 10]),
Task(3, [1, 5, 10]),
Task(4, [5, 20, 10]),
Task(5, [10, 15, 5])
]
seq = [4, 3, 1, 2, 0]
t = time.time()
itern = Evaluation(tasks, seq).run()
print ""
print "Evaluation time: ", time.time() - t, "s"
print "Evaluation result: ", itern, 'iterations' | [
"rom1guyot@gmail.com"
] | rom1guyot@gmail.com |
c4200302ee5ace6550a8f4c3f8c735bb3e348ae9 | 34d1e18fbf3d20a999171643e19d3255864860be | /posts/migrations/0006_auto_20201218_1652.py | 1b08f0eba75c6f8fabad8c54b03761718e90fb7d | [] | no_license | volkankaraali/django-blog | 8d533a26fe07c20ae00dad478f486049260d7f89 | b1f755c415883139dbc7cc66c837133e339e7231 | refs/heads/master | 2023-03-11T04:56:34.882168 | 2021-03-01T17:17:23 | 2021-03-01T17:17:23 | 343,489,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Generated by Django 3.1.3 on 2020-12-18 13:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0005_auto_20201218_1555'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.category', verbose_name='Kategori'),
),
]
| [
"volkankaraali1@gmail.com"
] | volkankaraali1@gmail.com |
b88552cc9ae0b9bb052893202d024dbb19ac9230 | 5657da1fed36707473b41a5b2072804f6065ff37 | /pycmdiaps/settings.py | b880583dbfa4cdf55245549e43f6815c8879e967 | [] | no_license | iscLuisPalomares/pycmdiaps | 5036a0483f630a05661baf98080eb76324e0720e | 3badacbff71a683272d7469bc1068bb82dd10769 | refs/heads/master | 2020-04-05T02:44:49.603604 | 2018-11-07T04:09:36 | 2018-11-07T04:09:36 | 156,488,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,097 | py | """
Django settings for pycmdiaps project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '61snbp^7wxp&&irjd+35+a4q*e*$kk-jxr()a0_f8zje^e+8c@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pycmdiaps.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pycmdiaps.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"iscluispalomares@gmail.com"
] | iscluispalomares@gmail.com |
d0cfeb20763fd372a5412739055d3c3ec5e13de1 | 439e9408dc380d24c08bf4a97f15134a171cac64 | /todolist/models.py | 42a1e6a11abaff0233a722290cc2d8088c1d46cf | [] | no_license | raonismaneoto/treinamento_eciis | 90a17bc43bd0579b25dfbf507259de6191386260 | 4976152c97bff70f28214a58d6262ed221189cd6 | refs/heads/master | 2021-01-21T22:35:35.290607 | 2017-05-16T20:15:20 | 2017-05-16T20:15:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from google.appengine.ext import ndb
class Counter(ndb.Model):
updates = ndb.IntegerProperty(default=0)
minutes = ndb.IntegerProperty(default=0)
class ToDo(ndb.Model):
title = ndb.StringProperty(required=True)
author = ndb.StringProperty()
text = ndb.TextProperty()
deadline = ndb.DateTimeProperty()
keyword = ndb.StringProperty(repeated=True)
updates = ndb.IntegerProperty(default=0)
| [
"daltonserey@gmail.com"
] | daltonserey@gmail.com |
8ada032bc3aab3862967caeb538c220f59726c0d | 90dfea4e88ca24c5bdf14db7624fa1825304518b | /coffesploit/core/pluginmanage/plugin.py | cf68799ef66b3515923dfa7e5757df23f92b3766 | [] | no_license | aisxyz/coffeesploit | a61ee74380d09b89be144cc5cf724d9a376a2f54 | cbbd0a888ec27a1733d6fae6ba55d44b03eacefe | refs/heads/master | 2021-11-13T23:37:16.854266 | 2016-07-06T13:17:15 | 2016-07-06T13:17:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | # -*- coding: utf-8 -*-
#from coffesploit.core.logmanager import logmanager
class Plugin(object):
"""
This is the base class for aLL plugins, all plugins should inherit from it
with '__init__()' parameter-free, and implement the following methods at least:
status() --> Dict-like, used to do some setting.
result() --> Dict-like, used to return executive outcomes.
run() --> None, used to run the current plugin.
help() --> String, used to show help information.
"""
def run(self, status):
pass
def status(self):
return {}
def help(self):
return("Use help command to get help information.\n")
def result(self):
""" reslut should return a dict-like """
return {}
| [
"leipeng1211@foxmail.com"
] | leipeng1211@foxmail.com |
966d74d56d048ce98e54842ab9549589742118e9 | 2a839c9f5ad608cbc6cbb7d03a8af482dcbd2956 | /cgi-bin/download.py | 49a2220a84d8090abe8d27c4ea01117f334c80cc | [] | no_license | scraperdragon/google-docs | 0a3653a10a8f4db6c419745e87c45564706405f8 | 56a6955bfbfa1acc56732356f9d828690985fce3 | refs/heads/master | 2021-01-19T15:32:38.868177 | 2015-04-17T10:55:17 | 2015-04-17T10:55:17 | 22,986,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | #!/usr/bin/python
import sys
import os
import requests
import json
import urlparse
def request_with_key(url):
return requests.get(url, headers={'Authorization': 'Bearer {key}'.format(key=key)})
def output(msg):
print json.dumps(msg)
exit(0)
DRIVE_FILES_URL = "https://www.googleapis.com/drive/v2/files/{id}"
DOCUMENT_EXPORT_URL = "https://docs.google.com/feeds/download/documents/export/Export?id={id}&exportFormat={format}"
print "Content-type: application/json\n\n";
# acquire environment
if len(sys.argv) == 4:
doc_id, key, filename = sys.argv[1:]
else:
params = urlparse.parse_qs(os.environ.get("QUERY_STRING"))
doc_id, = params.get('id')
key, = params.get('key')
filename, = params.get('filename')
if not(doc_id):
output({"error": "no id"})
if not(key):
output({"error": "no key"})
if not(filename):
output({"error": "no filename"})
r = request_with_key(DRIVE_FILES_URL.format(id=doc_id))
try:
j = r.json()
except Exception:
output({"error": "response wasn't json", "error_detail":r.content, "params": params})
if 'downloadUrl' in j:
xlsx_url = j['downloadUrl']
else:
xlsx_url = j['exportLinks']['application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']
xlsx_content = request_with_key(xlsx_url).content
with open(filename, 'w') as f:
f.write(xlsx_content)
output({"filename": filename})
| [
"dragon@scraperwiki.com"
] | dragon@scraperwiki.com |
6912f2477ca42c2d02095a157bee916ef68c2c49 | 55f67b4252ae9331b691e62e14cc055a78d23d74 | /__init__.py | a686b9d92d3e69235eae229dca1fcc8cd624f1d0 | [] | no_license | BlueSCar/ctfd-reddit-oauth | 417c0fb67425269e6bae31d4198818d7ab87442c | b4c74fdb2497387c64d481694d3b3cf59e93cbc0 | refs/heads/master | 2020-12-27T07:23:47.637438 | 2020-02-03T04:07:17 | 2020-02-03T04:07:17 | 237,813,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,309 | py | from flask import (
current_app as app,
render_template,
request,
redirect,
url_for,
session,
Blueprint,
)
from itsdangerous.exc import BadTimeSignature, SignatureExpired, BadSignature
from CTFd.models import db, Users, Teams
from CTFd.utils import get_config, get_app_config
from CTFd.utils.decorators import ratelimit
from CTFd.utils import user as current_user
from CTFd.utils import config, validators
from CTFd.utils import email
from CTFd.utils.security.auth import login_user, logout_user
from CTFd.utils.crypto import verify_password
from CTFd.utils.logging import log
from CTFd.utils.decorators.visibility import check_registration_visibility
from CTFd.utils.config import is_teams_mode
from CTFd.utils.config.visibility import registration_visible
from CTFd.utils.modes import TEAMS_MODE
from CTFd.utils.plugins import override_template
from CTFd.utils.security.signing import unserialize
from CTFd.utils.helpers import error_for, get_errors
import os
import base64
import requests
def load(app):
dir_path = os.path.dirname(os.path.realpath(__file__))
template_path = os.path.join(dir_path, 'reddit-signin.html')
override_template('login.html', open(template_path).read())
template_path = os.path.join(dir_path, 'reddit-register.html')
override_template('register.html', open(template_path).read())
template_path = os.path.join(dir_path, 'reddit-scoreboard.html')
override_template('scoreboard.html', open(template_path).read())
template_path = os.path.join(dir_path, 'reddit-scoreboard.html')
override_template('scoreboard.html', open(template_path).read())
template_path = os.path.join(dir_path, 'reddit-users.html')
override_template('users.html', open(template_path).read())
template_path = os.path.join(dir_path, 'reddit-public.html')
override_template('public.html', open(template_path).read())
@app.route("/reddit")
def reddit_login():
endpoint = (
get_app_config("REDDIT_AUTHORIZATION_ENDPOINT")
or get_config("reddit_authorization_endpoint")
or "https://ssl.reddit.com/api/v1/authorize"
)
client_id = get_app_config("REDDIT_CLIENT_ID") or get_config("reddit_client_id")
callback_url = get_app_config("REDDIT_CALLBACK_URL") or get_config("reddit_callback_url")
if client_id is None:
error_for(
endpoint="reddit.login",
message="Reddit OAuth Settings not configured. "
"Ask your CTF administrator to configure Reddit integration.",
)
return redirect(url_for("auth.login"))
redirect_url= "{endpoint}?client_id={client_id}&response_type=code&state={state}&redirect_uri={callback_url}&duration=temporary&scope=identity".format(
endpoint=endpoint, client_id=client_id, state=session["nonce"], callback_url=callback_url
)
return redirect(redirect_url)
@app.route("/reddit/callback", methods=["GET"])
@ratelimit(method="GET", limit=10, interval=60)
def oauth_redirect():
oauth_code = request.args.get("code")
state = request.args.get("state")
if session["nonce"] != state:
log("logins", "[{date}] {ip} - OAuth State validation mismatch")
error_for(endpoint="auth.login", message="OAuth State validation mismatch.")
return redirect(url_for("auth.login"))
if oauth_code:
url = (
get_app_config("REDDIT_TOKEN_ENDPOINT")
or get_config("reddit_token_endpoint")
or "https://ssl.reddit.com/api/v1/access_token"
)
client_id = get_app_config("REDDIT_CLIENT_ID") or get_config("reddit_client_id")
client_secret = get_app_config("REDDIT_CLIENT_SECRET") or get_config(
"reddit_client_secret"
)
reddit_user_agent = get_app_config("REDDIT_USER_AGENT") or get_config("reddit_user_agent")
callback_url = get_app_config("REDDIT_CALLBACK_URL") or get_config("reddit_callback_url")
client_auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
headers = {"content-type": "application/x-www-form-urlencoded", "User-Agent": reddit_user_agent}
token_request = requests.post(url, auth=client_auth, data={"grant_type": "authorization_code", "code": oauth_code, "redirect_uri": callback_url}, headers=headers)
if token_request.status_code == requests.codes.ok:
token = token_request.json()["access_token"]
user_url = (
get_app_config("REDDIT_API_ENDPOINT")
or get_config("reddit_api_endpoint")
or "https://oauth.reddit.com/api/v1/me"
)
headers = {
"Authorization": "Bearer " + str(token),
"User-Agent": reddit_user_agent
}
api_response = requests.get(url=user_url, headers=headers)
log("logins", str(api_response))
api_data = api_response.json()
user_id = api_data["id"]
user_name = api_data["name"]
user_email = api_data["name"] + "@reddit.com"
user = Users.query.filter_by(name=user_name).first()
if user is None:
# Check if we are allowing registration before creating users
if registration_visible():
user = Users(
name=user_name,
email=user_email,
oauth_id=user_id,
verified=True,
)
db.session.add(user)
db.session.commit()
else:
log("logins", "[{date}] {ip} - Public registration via Reddit blocked")
error_for(
endpoint="auth.login",
message="Public registration is disabled. Please try again later.",
)
return redirect(url_for("auth.login"))
if get_config("user_mode") == TEAMS_MODE:
team_id = api_data["team"]["id"]
team_name = api_data["team"]["name"]
team = Teams.query.filter_by(oauth_id=team_id).first()
if team is None:
team = Teams(name=team_name, oauth_id=team_id, captain_id=user.id)
db.session.add(team)
db.session.commit()
team_size_limit = get_config("team_size", default=0)
if team_size_limit and len(team.members) >= team_size_limit:
plural = "" if team_size_limit == 1 else "s"
size_error = "Teams are limited to {limit} member{plural}.".format(
limit=team_size_limit, plural=plural
)
error_for(endpoint="auth.login", message=size_error)
return redirect(url_for("auth.login"))
team.members.append(user)
db.session.commit()
if user.oauth_id is None:
user.oauth_id = user_id
user.verified = True
db.session.commit()
login_user(user)
return redirect(url_for("challenges.listing"))
else:
log("logins", "[{date}] {ip} - OAuth token retrieval failure")
log("logins", str(token_request))
log("logins", str(token_request.status_code))
log("logins", token_request.json()["access_token"])
error_for(endpoint="auth.login", message="OAuth token retrieval failure.")
return redirect(url_for("auth.login"))
else:
log("logins", "[{date}] {ip} - Received redirect without OAuth code")
error_for(
endpoint="auth.login", message="Received redirect without OAuth code."
)
return redirect(url_for("auth.login"))
| [
"radjewwj@gmail.com"
] | radjewwj@gmail.com |
ae88ce193bec133a7ab6e60483cbce6181dafdcf | ba373bc39d90f36f396d40eb93adb2f0dbcbeafe | /3_canvas.py | 859d6579541e0640547ad8f37663b5d87f929975 | [] | no_license | durveshvpatil01/Python-GUI | 5a41412cc364de33fa1742afae519b8c9a15d97a | 5589a8d007ba1d0bbca16390a912ac227995b5c1 | refs/heads/main | 2023-04-11T02:52:15.373385 | 2021-04-14T11:58:05 | 2021-04-14T11:58:05 | 357,888,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | import tkinter
top = tkinter.Tk()
C = tkinter.Canvas(top, bg="blue", height=250, width=300)
coord = 10, 50, 240, 210
arc = C.create_arc(coord, start=0, extent=90, fill="skyblue")
C.pack()
top.mainloop()
| [
"durveshvpatil01@gmail.com"
] | durveshvpatil01@gmail.com |
248fc2138c8eed4fa4fb235c3584ea31d3447f36 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations_async/_azure_firewalls_operations_async.py | 550653dc96d84f6afaa60225d81bea644ed59841 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 21,582 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations:
"""AzureFirewallsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> None:
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def get(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> "models.AzureFirewall":
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.AzureFirewall
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureFirewall"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "models.AzureFirewall",
**kwargs
) -> "models.AzureFirewall":
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureFirewall"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureFirewall')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "models.AzureFirewall",
**kwargs
) -> "models.AzureFirewall":
"""Creates or updates the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to the create or update Azure Firewall operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.AzureFirewall
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.AzureFirewall
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureFirewall"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.AzureFirewallListResult"]:
"""Lists all Azure Firewalls in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureFirewallListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.AzureFirewallListResult"]:
"""Gets all the Azure Firewalls in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureFirewallListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
33ccf5839b5b9020d97afd2856a21d22e416e841 | a66028aeb13edde4f10fb2e5664f7beb408fb7c1 | /app.py | e3068f0043dbdb96cf7b7b7470009dfc0809cd7e | [] | no_license | Pablitinho/OpenVino_First_App | d7af77f0cdb393ae8e4eac72ab9f62be8f4f92bf | 336490c6f06a4027dd750ea07783721f68e34909 | refs/heads/master | 2020-11-27T11:40:05.485525 | 2019-12-21T12:34:44 | 2019-12-21T12:34:44 | 229,424,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,976 | py | import argparse
import cv2
import numpy as np
import time
from handle_models import handle_output, preprocessing
from inference import Network
CAR_COLORS = ["white", "gray", "yellow", "red", "green", "blue", "black"]
CAR_TYPES = ["car", "bus", "truck", "van"]
def get_args():
'''
Gets the arguments from the command line.
'''
parser = argparse.ArgumentParser("Basic Edge App with Inference Engine")
# -- Create the descriptions for the commands
c_desc = "CPU extension file location, if applicable"
d_desc = "Device, if not CPU (GPU, FPGA, MYRIAD)"
i_desc = "The location of the input image"
m_desc = "The location of the model XML file"
t_desc = "The type of model: POSE, TEXT or CAR_META"
# -- Add required and optional groups
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
# -- Create the arguments
required.add_argument("-i", help=i_desc, required=True)
required.add_argument("-m", help=m_desc, required=True)
required.add_argument("-t", help=t_desc, required=True)
optional.add_argument("-c", help=c_desc, default=None)
optional.add_argument("-d", help=d_desc, default="CPU")
args = parser.parse_args()
return args
def get_mask(processed_output):
'''
Given an input image size and processed output for a semantic mask,
returns a masks able to be combined with the original image.
'''
# Create an empty array for other color channels of mask
empty = np.zeros(processed_output.shape)
# Stack to make a Green mask where text detected
mask = np.dstack((empty, processed_output, empty))
return mask
def create_output_image(model_type, image, output):
'''
Using the model type, input image, and processed output,
creates an output image showing the result of inference.
'''
if model_type == "POSE":
# Remove final part of output not used for heatmaps
output = output[:-1]
# Get only pose detections above 0.5 confidence, set to 255
for c in range(len(output)):
output[c] = np.where(output[c]>0.5, 255, 0)
# Sum along the "class" axis
output = np.sum(output, axis=0)
# Get semantic mask
pose_mask = get_mask(output)
# Combine with original image
image = image + pose_mask
return image
elif model_type == "TEXT":
# Get only text detections above 0.5 confidence, set to 255
output = np.where(output[1]>0.5, 255, 0)
# Get semantic mask
text_mask = get_mask(output)
# Add the mask to the image
image = image + text_mask
return image
elif model_type == "CAR_META":
# Get the color and car type from their lists
color = CAR_COLORS[output[0]]
car_type = CAR_TYPES[output[1]]
# Scale the output text by the image shape
scaler = max(int(image.shape[0] / 1000), 1)
# Write the text of color and type onto the image
image = cv2.putText(image,
"Color: {}, Type: {}".format(color, car_type),
(50 * scaler, 100 * scaler), cv2.FONT_HERSHEY_SIMPLEX,
2 * scaler, (255, 255, 255), 3 * scaler)
return image
else:
print("Unknown model type, unable to create output image.")
return image
def perform_inference(args):
'''
Performs inference on an input image, given a model.
'''
# Create a Network for using the Inference Engine
inference_network = Network()
#inference_network = openvino.Network()
# Load the model in the network, and obtain its input shape
n, c, h, w = inference_network.load_model(args.m, args.d, args.c)
print("Height: ", h, "Width: ", w)
# Read the input image
image = cv2.imread(args.i)
### TODO: Preprocess the input image
preprocessed_image = preprocessed_image = preprocessing(image, h, w) #None
start = time.time()
# Perform synchronous inference on the image
inference_network.sync_inference(preprocessed_image)
# Obtain the output of the inference request
output = inference_network.extract_output()
### TODO: Handle the output of the network, based on args.t
### Note: This will require using `handle_output` to get the correct
### function, and then feeding the output to that function.
output_func = handle_output(args.t)
processed_output = output_func(output, image.shape)
end = time.time()
print("ms: ", (end - start)*1000.0)
# Create an output image based on network
output_image = create_output_image(args.t, image, processed_output)
# Save down the resulting image
outputFile=format(args.t)+".png"
#"outputs/{}-output.png".format(args.t)
cv2.imwrite(outputFile, output_image)
def main():
args = get_args()
perform_inference(args)
if __name__ == "__main__":
main()
| [
"p.guzman-sanchez@denso-adas.de"
] | p.guzman-sanchez@denso-adas.de |
e1fa4afc8678a3a63cb20ebd6b7b2912c59085e6 | cf9b2e7ad3f988957ad2b46413b80ff3a9bbb3bc | /scripts/clean_data_base.py | 72e609172fede71ad88f3c4a0135e17b9d9222e7 | [
"MIT",
"Python-2.0"
] | permissive | dlegor/textmining_pnud | 2942c401f42d13e5ae601ec318dc018b9ddad9a5 | a9f37e439f6b02940743d2e361f817cb49da6e04 | refs/heads/master | 2023-04-11T15:00:24.594907 | 2021-04-06T22:36:46 | 2021-04-06T22:36:46 | 289,972,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,928 | py | #!/usr/bin/env python
# coding: utf-8
# # Limpieza de la Base de Datos
#
# Este script contiene todas las funciones necesarias para limpiar y transformar la base en la versión más adecuada para el proyecto.
#
# Los objetivos son los siguientes:
#
# * Depurar los campos que contengan texto con algún error de escritura o caracteres inadecuados para el procesamiento.
# * Fijar un tipo de dato adecuado para el manejo con otros archivos
# * Compactar u organizar las columnas que en un formato que permita una mejor visualización y organización de la información referente a los reportes mensuales.
#
#Required packages
import pandas as pd
import numpy as np
import warnings
import typer
from wasabi import msg
from pathlib import Path
from typing import Union
#warning messages are ignored
warnings.filterwarnings("ignore")
# Constants for the cleaning process
SCHEMA={'CICLO':'category',
'ID_RAMO':'category',
'DESC_RAMO':'category',
'ID_UR':'category',
'DESC_UR':'category',
'ID_ENTIDAD_FEDERATIVA':'category',
'ENTIDAD_FEDERATIVA':'category',
'ID_MUNICIPIO':'category',
'MUNICIPIO':'category',
'GPO_FUNCIONAL':'category',
'DESC_GPO_FUNCIONAL':'category',
'ID_FUNCION':'category',
'DESC_FUNCION':'category',
'ID_SUBFUNCION':'category',
'DESC_SUBFUNCION':'category',
'ID_AI':'category',
'DESC_AI':'category',
'ID_MODALIDAD':'category',
'DESC_MODALIDAD':'category',
'ID_PP':'category',
'DESC_PP':'category',
'MODALIDAD_PP':'category',
'ID_PND':'category',
'DESC_PND':'category',
'OBJETIVO_PND':'category',
'PROGRAMA_PND':'category',
'DESC_PROGRAMA_PND':'category',
'OBJETIVO_PROGRAMA_PND':'category',
'DESC_OBJETIVO_PROGRAMA_PND':'category',
'OBJETIVO_ESTRATEGICO':'category',
'ID_NIVEL':'category',
'DESC_NIVEL':'category',
'INDICADOR_PND':'category',
'TIPO_RELATIVO':'category',
'FRECUENCIA':'category',
'TIPO_INDICADOR':'category',
'DIMENSION':'category',
'UNIDAD_MEDIDA':'category',
'SENTIDO':'category'
}
DTIPO_INDICADOR={'Estratégico':'Estratégico',
'Gestión':'Gestión',
'Sesiones de Comité Técnico':'Sesiones de Comité Técnico',
'Gestion':'Gestión',
'SOLICITUDES DE SERVICIO':'Solicitudes de Servicio',
'ECONOMIA':'Economía',
'Estrategico':'Estratégico',
'gestión':'Gestión',
'Absoluto':'Absoluto',
'Sectorial':'Sectorial',
'Desempeño Operativo':'Desempeño Operativo',
'GESTION':'Gestión',
'ESTRATÉGICO':'Estratégico',
'De Gestión':'Gestión',
'Estratgico':'Estratégico'}
DDIMENSION={'Eficacia':'Eficacia',
'Eficiencia':'Eficacia',
'Economía':'Economía',
'Calidad':'Calidad',
'eficacia':'Eficacia',
'ECONOMIA':'Economía',
'0':'Sin Dato',
'Servicios Personales':'Servicios Personales',
'Económica':'Economía',
'Eificacia':'Eficacia',
'EFICACIA':'Eficacia',
'Eficiciencia':'Eficiencia',
'Es la suma ponderada de la proporción de las observaciones de alto impacto respecto del total de observaciones determinadas en las auditorías directas de alto impacto realizadas por el área de Auditoría Interna del OIC; la calidad de dichas observaciones, y la calidad de las recomendaciones que de éstas se derivan. (Eficacia)':'Eficacia',
'Es un promedio ponderado que evalúa al OIC en la atención de quejas y denuncias. (Eficacia)':'Eficacia',
'Mide las acciones de las Áreas de Responsabilidades en algunas de sus funciones primordiales: 1) el tiempo en la atención de los expedientes, 2) la resolución de expedientes y 3) la firmeza de las sanciones impuestas. (Eficacia)':'Eficacia',
'PORCENTAJE DE SOLICITUDES DE PRÉSTAMO AUTORIZADAS':'Porcentaje de Solicitudes de Préstamo Autorizadas',
'El Indicador de Mejora de la Gestión (IMG) evalúa las acciones realizadas por los OIC en sus instituciones de adscripción y en aquellas bajo su atención, así como los resultados alcanzados en las mismas. Específicamente, el indicador se orienta a evaluar la manera en que los OIC:\r\n\r\n- Promueven acciones orientadas al logro de resultados respecto a las vertientes comprometidas en sus Programas Anuales de Trabajo (PAT)2015, en materia de auditoría para el desarrollo y mejora de la gestión pública.':'Sin Datos'}
# Auxiliary functions
def cln_txt(str_inp:str)->str:
"""
remove special characters
"""
str_inp=str_inp.replace(u'\xa0',u' ')
str_inp=str_inp.replace(u'\n',u' ')
str_inp=str_inp.replace(u'\r',u' ')
txt=''.join([s for s in str_inp if not s in '!"#$%&\'()*+-;<=>?@[\\]^_`{|}~' ])
return txt.replace(' ','').strip()
def main(path_file:str=typer.Argument(...,help='Path to input file')):
"""
Limpieza de la Base de Datos
Este script contiene todas las funciones necesarias para limpiar y transformar la base en la versión más adecuada para el proyecto.
Los objetivos son los siguientes:
* Depurar los campos que contengan texto con algún error de escritura o caracteres inadecuados para el procesamiento.
* Fijar un tipo de dato adecuado para el manejo con otros archivos
* Compactar u organizar las columnas que en un formato que permita una mejor visualización y organización de la información referente a los reportes mensuales.
"""
#Load Data
msg.info("Load data...")
data=pd.read_csv(path_file,encoding='latin1',low_memory=False,dtype=SCHEMA)
msg.info("General Information:\n")
msg.info(data.info())
#Remove rows with NIVEL== FID
msg.good("Remove FID...")
data=data[data.DESC_NIVEL!='FID'].copy()
#Fields cleaning DESCRIPCIONES(DES_*)
msg.info("Cleaning DESC_ ...")
data.DESC_RAMO=data.DESC_RAMO.apply(lambda x: cln_txt(str(x)))
data.DESC_UR=data.DESC_UR.apply(lambda x: cln_txt(str(x)))
data.DESC_AI=data.DESC_AI.apply(lambda x: cln_txt(str(x)))
data.DESC_PP=data.DESC_PP.apply(lambda x: cln_txt(str(x)))
data.OBJETIVO_PND=data.OBJETIVO_PND.apply(lambda x: cln_txt(str(x)))
data.DESC_OBJETIVO_PROGRAMA_PND=data.DESC_OBJETIVO_PROGRAMA_PND.apply(lambda x: cln_txt(str(x)))
data.OBJETIVO_ESTRATEGICO=data.OBJETIVO_ESTRATEGICO.apply(lambda x: cln_txt(str(x)))
data.DESC_MATRIZ=data.DESC_MATRIZ.apply(lambda x: cln_txt(str(x)))
data.DESC_OBJETIVO=data.DESC_OBJETIVO.apply(lambda x: cln_txt(str(x)))
#Change wrong names
msg.info("Changes names...")
data.TIPO_INDICADOR=data.TIPO_INDICADOR.map(DTIPO_INDICADOR)
data.DIMENSION=data.DIMENSION.map(DDIMENSION)
#Change data type
msg.info("Change data type...")
data.ID_OBJETIVO=data.ID_OBJETIVO.astype('int')
data.ID_OBJETIVO_PADRE=data.ID_OBJETIVO_PADRE.fillna(-1).astype('int')
data.ID_INDICADOR_CICLO_ANTERIOR=data.ID_INDICADOR_CICLO_ANTERIOR.fillna(-1).astype('int')
data.CICLO_LINEA_BASE=data.CICLO_LINEA_BASE.fillna(-1).astype('int')
#List of columns to group data
msg.info("Create List of Columns...")
META_MES_COL=data.columns[data.columns.str.startswith('META_MES')].tolist()
META_AJUSTADA_MES_COL=data.columns[data.columns.str.startswith('META_AJUSTADA_MES')].tolist()
AVANCE_MES_COL=data.columns[data.columns.str.startswith('AVANCE_MES')].tolist()
JUSTIFICACION_AJUSTE_MES_COL=data.columns[data.columns.str.startswith('JUSTIFICACION_AJUSTE_MES')].tolist()
AVANCE_CAUSA_MES_COL=data.columns[data.columns.str.startswith('AVANCE_CAUSA_MES')].tolist()
AVANCE_EFECTO_MES_COL=data.columns[data.columns.str.startswith('AVANCE_EFECTO_MES')].tolist()
AVANCE_OTROS_MOTIVOS_MES_COL=data.columns[data.columns.str.startswith('AVANCE_OTROS_MOTIVOS_MES')].tolist()
#META by months
msg.info("Meta by months...")
for i in range(12):
data[f'RECORDS_META_MES{i+1}']=(data[f'META_MES{i+1}'].astype('string')+':'\
+data[f'META_MES{i+1}_NUM'].astype('string')+':'+data[f'META_MES{i+1}_DEN']\
.astype('string'))
#META AJUSTADA by months
msg.info("Meta Ajustada by months...")
for i in range(12):
data[f'RECORDS_META_AJUSTADA_MES{i+1}']=(data[f'META_MES{i+1}'].astype('string')\
+':'+data[f'META_MES{i+1}_NUM'].astype('string')+':'+data[f'META_MES{i+1}_DEN']\
.astype('string'))
#AVANCE by months
msg.info("AVANCE by months...")
for i in range(12):
data[f'RECORDS_AVANCE_MES{i+1}']=(data[f'META_MES{i+1}'].astype('string')+':'+\
data[f'META_MES{i+1}_NUM'].astype('string')+':'+data[f'META_MES{i+1}_DEN']\
.astype('string'))
#JUSTIFICACION by Months
msg.info("JUSTIFICACION by months...")
func='|'.join
data['JUSTIFICACIONES_AJUSTE_POR_MES']=data[JUSTIFICACION_AJUSTE_MES_COL]\
.fillna('#').astype('str').apply(lambda x:func(x),axis=1)
#AVANCE CAUSA by months
msg.info("AVANCE CAUSA by months...")
data['AVANCE_CAUSA_POR_MES']=data[AVANCE_CAUSA_MES_COL].fillna('#').astype('str')\
.apply(lambda x:func(x),axis=1)
#AVANCE EFECTO by Months
msg.info("AVANCE EFECTO by months...")
data['AVANCE_EFECTO_POR_MES']=data[AVANCE_EFECTO_MES_COL].fillna('#').astype('str')\
.apply(lambda x:func(x),axis=1)
#AVANCE OTROS MOTIVOS by months
msg.info("AVANCE OTROS MOTIVOs by months...")
data['AVANCE_OTROS_MOTIVOS_POR_MES']=data[AVANCE_OTROS_MOTIVOS_MES_COL].fillna('#')\
.astype('str').apply(lambda x:func(x),axis=1)
#Delete columns group data
msg.info("delete columns")
data.drop(labels=META_MES_COL+META_AJUSTADA_MES_COL+AVANCE_MES_COL,inplace=True,axis=1)
data.drop(labels=JUSTIFICACION_AJUSTE_MES_COL+AVANCE_CAUSA_MES_COL+
AVANCE_EFECTO_MES_COL+AVANCE_OTROS_MOTIVOS_MES_COL,inplace=True,axis=1)
msg.info("General Information:\n")
data.info()
#Save File
msg.info("Save the Files...")
data.reset_index().to_feather('base')#Para version feather
#data.to_csv('base.csv.zip',encoding='latin1', index=False,compression='zip')# Para guardad en versión csv
msg.good("OK!!!")
if __name__=='__main__':
typer.run(main) | [
"noreply@github.com"
] | noreply@github.com |
640c834ff5726e6163a46c34dd51923636679b86 | dfbb23eb4e42ceb184f03e3669130a425f06a077 | /busanItClass/ex9.py | 560cd4c6ba651b34407b164cc7fece1bb389ebf8 | [] | no_license | gi-web/Python | af6c3d09cdbd6cdca78b7ec603a19bb566a520b0 | 74bcaace335026ff56fc050607775ebb92d0713a | refs/heads/main | 2023-05-04T21:59:51.866177 | 2021-05-22T17:40:12 | 2021-05-22T17:40:12 | 353,245,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import random
lotto=set()
i=0
while True:
lotto.add(random.randint(1,45))
i=i+1
if len(lotto)==6:
break
print("로또 넘버 :",sorted(lotto))
print("중복된 난수 발생 횟수 :",i-6)
| [
"phok75@naver.com"
] | phok75@naver.com |
cecfbc03e21c8156c775e484b68292ec8c14c60f | 4dc4345cca9c5f452bf4b87263505ee6b4e960af | /data_types_and_variables/contact_names.py | 2e5c1b630d63078c6ee3b04f570a368583e63c4b | [] | no_license | ivan-yosifov88/python_fundamentals | 88c7eb5167bbe6692b95051d1551496a84893524 | 1cfe6d18453362fc26be984f6cb871b9d7dec63d | refs/heads/master | 2023-03-29T16:46:55.363035 | 2021-04-07T10:39:44 | 2021-04-07T10:39:44 | 341,604,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | first_name = input()
last_name = input()
delimiter = input()
print(f"{first_name}{delimiter}{last_name}") | [
"ivan.yosifov88gmail.com"
] | ivan.yosifov88gmail.com |
afbfe84f132d7b99ee8ff3f35f0021c173ec83ba | 38f666272afc68b19e9fc612b98dcfa0cdff6755 | /src/backend/csv_statistics/user/migrations/0001_initial.py | 4c111b12b7c06f932a6f4c754c0f2918eeeea55e | [] | no_license | sunnytroo/trail-assignment | a63444826436bb492a18a255b74d2f41dac64fc0 | 08aa56d6edecbb38e7bf22cde600e2030023aa8a | refs/heads/main | 2023-04-07T13:34:53.518546 | 2021-04-09T10:25:54 | 2021-04-09T10:25:54 | 356,217,734 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,027 | py | # Generated by Django 3.2 on 2021-04-09 06:19
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cities', to='user.country')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, unique=True)),
('gender', models.CharField(choices=[('male', 'Male'), ('female', 'Female')], default='male', max_length=8)),
('age', models.IntegerField()),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='city_users', to='user.country')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='country_users', to='user.country')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"sunnyp.ttdev@gmail.com"
] | sunnyp.ttdev@gmail.com |
904eb1e12d168271540f59a8839324bb0445b326 | 01db0f0f124b76943a404ebd2ec30bd672799fec | /MAP/lemmatization/no stopwords filtered - no lemmed/Method 1/indexing.py | 68c5d81a665951cfd3c342e6e82ca444085edcf3 | [] | no_license | vytran0710/simple-vector-retrieval | 745353a63ebf7e091906c1c7769beeddfc6253a7 | 0da96190f9c87c8084fdeadff91a19a27bce07fa | refs/heads/main | 2023-02-22T17:15:53.676574 | 2021-01-23T10:39:13 | 2021-01-23T10:39:13 | 325,326,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | import nltk
import re
import os
import pickle
from nltk.corpus import stopwords
import math
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def preprocess_text(text):
processed_text = text.lower()
processed_text = processed_text.replace("’", "'")
processed_text = processed_text.replace("“", '"')
processed_text = processed_text.replace("”", '"')
non_words = re.compile(r"[^A-Za-z']+")
processed_text = re.sub(non_words, ' ', processed_text)
return processed_text
def get_words_from_text(text):
processed_text = preprocess_text(text)
filtered_words = [word for word in processed_text.split()]
return filtered_words
def get_text_from_file(filename):
with open(filename, encoding='cp1252', mode='r') as f:
text = f.read()
return text
def indexing(docs_path):
terms = []
index = []
norm_list = []
N = 0
# Indexing postings
for doc_file in sorted(os.listdir(docs_path),key=lambda x: int(os.path.splitext(x)[0])):
filename = os.path.join(docs_path, doc_file)
N += 1
text = get_text_from_file(filename)
words = get_words_from_text(text)
for word in words:
if word not in terms:
terms.append(word)
index.append([(N, 1)])
else:
temp_index = terms.index(word)
if N not in [i[0] for i in index[temp_index]]:
index[temp_index].append((N, 1))
continue
for i in range(len(index[temp_index])):
if index[temp_index][i][0] == N:
index[temp_index][i] = (N, index[temp_index][i][1] + 1)
break
# Calculate weights
for i in range(len(index)):
for j in range(len(index[i])):
temp = list(index[i][j])
temp[1] = index[i][j][1] * (1/len(index[i])) # Formula: TF * IDF (IDF = 1 / ndoc(t))
index[i][j] = tuple(temp)
# Normalization
for i in range(N):
norm = 0
for j in range(len(index)):
temp = [item for item in index[j] if item[0] == i+1]
if len(temp) != 0:
norm += math.pow(temp[0][1], 2)
norm_list.append(math.sqrt(norm))
for i in range(len(index)):
for j in range(len(index[i])):
temp = list(index[i][j])
temp[1] = index[i][j][1] / norm_list[index[i][j][0]-1]
index[i][j] = tuple(temp)
return terms, index
terms, index = indexing(r"D:\Github\simple-vector-retrieval\Cranfield\Cranfield")
file = open(r'D:\Github\simple-vector-retrieval\index\terms.sav', 'wb')
pickle.dump(terms, file)
file.close()
file2 = open(r'D:\Github\simple-vector-retrieval\index\index.sav', 'wb')
pickle.dump(index, file2)
file2.close() | [
"56116908+vytran0710@users.noreply.github.com"
] | 56116908+vytran0710@users.noreply.github.com |
6c36a4054982502406c051de7604521cbe7d6185 | b09b78fd776a5899e79dc04ed395f35d54ca5f8e | /confidant/scripts/migrate_bool.py | cefaf2ba85f01bbc978555e09a104cc96e3a6348 | [
"Apache-2.0"
] | permissive | jrosco/confidant | 37536b8454e8cbbddcb5b87080040a1d448c6fbd | c8cbc2ba64f8093e6b47c2f2eff59f5d291d54fd | refs/heads/master | 2020-07-30T13:05:22.309726 | 2019-10-16T18:06:23 | 2019-10-16T18:06:23 | 181,585,820 | 0 | 0 | Apache-2.0 | 2019-09-22T09:12:03 | 2019-04-16T00:34:46 | Python | UTF-8 | Python | false | false | 11,346 | py | from confidant.app import app
from flask_script import Command, Option
import time
from botocore.exceptions import ClientError
from pynamodb.exceptions import UpdateError
from pynamodb.expressions.operand import Path
from pynamodb.attributes import (
UnicodeAttribute,
BooleanAttribute,
)
from pynamodb.models import Model
from confidant.models.session_cls import DDBSession
from confidant.models.connection_cls import DDBConnection
class GenericCredential(Model):
class Meta:
table_name = app.config.get('DYNAMODB_TABLE')
if app.config.get('DYNAMODB_URL'):
host = app.config.get('DYNAMODB_URL')
region = app.config.get('AWS_DEFAULT_REGION')
connection_cls = DDBConnection
session_cls = DDBSession
id = UnicodeAttribute(hash_key=True)
enabled = BooleanAttribute(default=True)
def _build_lba_filter_condition(attribute_names):
"""
Build a filter condition suitable for passing to scan/rate_limited_scan,
which will filter out any items for which none of the given attributes have
native DynamoDB type of 'N'.
"""
int_filter_condition = None
for attr_name in attribute_names:
if int_filter_condition is None:
int_filter_condition = Path(attr_name).is_type('N')
else:
int_filter_condition |= Path(attr_name).is_type('N')
return int_filter_condition
def _build_actions(model_class, item, attribute_names):
"""
Build a list of actions required to update an item.
"""
actions = []
condition = None
for attr_name in attribute_names:
if not hasattr(item, attr_name):
raise ValueError(
'attribute {0} does not exist on model'.format(attr_name)
)
old_value = getattr(item, attr_name)
if old_value is None:
continue
if not isinstance(old_value, bool):
raise ValueError(
'attribute {0} does not appear to be a boolean '
'attribute'.format(attr_name)
)
actions.append(getattr(model_class, attr_name).set(
getattr(item, attr_name))
)
if condition is None:
condition = Path(attr_name) == (1 if old_value else 0)
else:
condition = condition & Path(attr_name) == (1 if old_value else 0)
return actions, condition
def _handle_update_exception(e, item):
"""
Handle exceptions of type update.
"""
if not isinstance(e.cause, ClientError):
raise e
code = e.cause.response['Error'].get('Code')
if code == 'ConditionalCheckFailedException':
app.logger.warn(
'conditional update failed (concurrent writes?) for object:'
' (you will need to re-run migration)'
)
return True
if code == 'ProvisionedThroughputExceededException':
app.logger.warn('provisioned write capacity exceeded at object:'
' backing off (you will need to re-run migration)')
return True
raise e
def migrate_boolean_attributes(model_class,
attribute_names,
read_capacity_to_consume_per_second=10,
allow_scan_without_rcu=False,
mock_conditional_update_failure=False,
page_size=None,
limit=None,
number_of_secs_to_back_off=1,
max_items_updated_per_second=1.0):
"""
Migrates boolean attributes per GitHub
`issue 404 <https://github.com/pynamodb/PynamoDB/issues/404>`_.
Will scan through all objects and perform a conditional update
against any items that store any of the given attribute names as
integers. Rate limiting is performed by passing an appropriate
value as ``read_capacity_to_consume_per_second`` (which defaults to
something extremely conservative and slow).
Note that updates require provisioned write capacity as
well. Please see `the DynamoDB docs
<http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
HowItWorks.ProvisionedThroughput.html>`_
for more information. Keep in mind that there is not a simple 1:1
mapping between provisioned read capacity and write capacity. Make
sure they are balanced. A conservative calculation would assume
that every object visted results in an update.
The function with log at level ``INFO`` the final outcome, and the
return values help identify how many items needed changing and how
many of them succeed. For example, if you had 10 items in the
table and every one of them had an attribute that needed
migration, and upon migration we had one item which failed the
migration due to a concurrent update by another writer, the return
value would be: ``(10, 1)``
Suggesting that 9 were updated successfully.
It is suggested that the migration step be re-ran until the return
value is ``(0, 0)``.
:param model_class:
The Model class for which you are migrating. This should be the
up-to-date Model class using a BooleanAttribute for the relevant
attributes.
:param attribute_names:
List of strings that signifiy the names of attributes which are
potentially in need of migration.
:param read_capacity_to_consume_per_second:
Passed along to the underlying `rate_limited_scan` and intended as the
mechanism to rate limit progress. Please see notes below around write
capacity.
:param allow_scan_without_rcu:
Passed along to `rate_limited_scan`; intended to allow unit tests to
pass against DynamoDB Local.
:param mock_conditional_update_failure:
Only used for unit testing. When True, the conditional update
expression used internally is updated such that it is guaranteed to
fail. This is meant to trigger the code path in boto, to allow us to
unit test that we are jumping through appropriate hoops handling the
resulting failure and distinguishing it from other failures.
:param page_size:
Passed along to the underlying 'page_size'. Page size of the scan to
DynamoDB.
:param limit:
Passed along to the underlying 'limit'. Used to limit the number of
results returned.
:param number_of_secs_to_back_off:
Number of seconds to sleep when exceeding capacity.
:param max_items_updated_per_second:
An upper limit on the rate of items update per second.
:return: (number_of_items_in_need_of_update,
number_of_them_that_failed_due_to_conditional_update)
"""
app.logger.info('migrating items; no progress will be reported until '
'completed; this may take a while')
num_items_with_actions = 0
num_update_failures = 0
items_processed = 0
time_of_last_update = 0.0
if max_items_updated_per_second <= 0.0:
raise ValueError(
'max_items_updated_per_second must be greater than zero'
)
for item in model_class.rate_limited_scan(
_build_lba_filter_condition(attribute_names),
read_capacity_to_consume_per_second=(
read_capacity_to_consume_per_second
),
page_size=page_size,
limit=limit,
allow_rate_limited_scan_without_consumed_capacity=(
allow_scan_without_rcu
)):
items_processed += 1
if items_processed % 1000 == 0:
app.logger.info(
'processed items: {} Thousand'.format(items_processed/1000)
)
actions, condition = _build_actions(model_class, item, attribute_names)
if not actions:
continue
if mock_conditional_update_failure:
condition = condition & (Path('__bogus_mock_attribute') == 5)
try:
num_items_with_actions += 1
# Sleep amount of time to satisfy the maximum items updated per sec
# requirement
time.sleep(
max(0, 1 / max_items_updated_per_second - (
time.time() - time_of_last_update
))
)
time_of_last_update = time.time()
item.update(actions=actions, condition=condition)
except UpdateError as e:
if _handle_update_exception(e, item):
num_update_failures += 1
# In case of throttling, back off amount of seconds before
# continuing
time.sleep(number_of_secs_to_back_off)
app.logger.info(
'finished migrating; {} items required updates'.format(
num_items_with_actions
)
)
app.logger.info(
'{} items failed due to racing writes and/or exceeding capacity and '
'require re-running migration'.format(num_update_failures)
)
return num_items_with_actions, num_update_failures
class MigrateBooleanAttribute(Command):
option_list = (
Option(
'--RCU',
action="store",
dest="RCU",
type=int,
default=10,
help='Read Capacity Units to be used for scan method.'
),
Option(
'--page-size',
action="store",
dest="page_size",
type=int,
default=None,
help='Page size used in the scan.'
),
Option(
'--limit',
action="store",
dest="limit",
type=int,
default=None,
help='Limit the number of results returned in the scan.'
),
Option(
'--back-off',
action="store",
dest="back_off",
type=int,
default=1,
help='Number of seconds to sleep when exceeding capacity.'
),
Option(
'--update-rate',
action="store",
dest="update_rate",
type=float,
default=1.0,
help='An upper limit on the rate of items update per second.'
),
Option(
'--scan-without-rcu',
action="store_true",
dest="scan_without_rcu",
default=False,
help='For development purposes, allow scanning without read '
'capacity units'
)
)
def run(self, RCU, page_size, limit, back_off, update_rate,
scan_without_rcu):
attributes = ['enabled']
app.logger.info('RCU: {}, Page Size: {}, Limit: {}, Back off: {}, '
'Max update rate: {}, Attributes: {}'.format(
RCU, page_size, limit, back_off, update_rate,
attributes
))
model = GenericCredential
res = migrate_boolean_attributes(
model,
attributes,
read_capacity_to_consume_per_second=RCU,
page_size=page_size,
limit=limit,
number_of_secs_to_back_off=back_off,
max_items_updated_per_second=update_rate,
allow_scan_without_rcu=scan_without_rcu
)
app.logger.info(res)
| [
"noreply@github.com"
] | noreply@github.com |
1698e2d4ad1cb9f4b1613553d57826a2f86f0855 | 990484e040430f184bbc53ab81ac00c5f6a0c8eb | /app/admin/model.py | 643b184677a5f9219b7f48d9f33def5640bd370d | [] | no_license | Jamesltesim/TFT | 344c6ebb5134639daad1bf63ea02efe918d93fd1 | f5cb66b69168922f01e581f9ff2626b247cb3928 | refs/heads/master | 2022-12-14T08:41:48.251533 | 2018-09-05T14:57:29 | 2018-09-05T14:57:29 | 142,824,710 | 0 | 0 | null | 2022-12-08T02:19:40 | 2018-07-30T04:27:13 | Python | UTF-8 | Python | false | false | 4,210 | py | # coding:utf-8
from datetime import datetime
from sqlalchemy import ForeignKey, Column, Integer, String, VARCHAR, DateTime, BOOLEAN, FLOAT
from sqlalchemy.orm import relationship
from .. import db
# engine = create_engine(Conf.MYSQL_INFO, pool_recycle=7200)
#
#
# Base = declarative_base()
#
# db_session = scoped_session(sessionmaker(autocommit=False,
# autoflush=False,
# bind=engine))
# Base.query = db_session.query_property()
class Commodity(db.Model):
__tablename__ = 'commodity'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(VARCHAR(60))
price = Column(FLOAT)
product_id = Column(VARCHAR(30))
stock = Column(Integer)
category= Column(Integer,ForeignKey('commodity_category.id'))
commodity_category = relationship('Commodity_category',backref=db.backref('commodity', lazy='dynamic'))
status = Column(Integer)
class Commodity_category(db.Model):
__tablename__ = 'commodity_category'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(VARCHAR(60))
class Commodity_recommend(db.Model):
__tablename__ = 'commodity_recommend'
id = Column(Integer, primary_key=True, autoincrement=True)
commodity_id = Column(Integer,ForeignKey('commodity.id'))
commodity = relationship('Commodity', backref=db.backref('commodity_recommend', lazy='dynamic'))
desc = Column(VARCHAR(60))
class Commodity_repertory(db.Model):
__tablename__ = 'commodity_repertory'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(VARCHAR(45))
# 进货价格
purchase_price = Column(FLOAT)
time = Column(DateTime)
operator = Column(Integer)
detail = Column(VARCHAR(80))
# 供货商
supplier = Column(VARCHAR(45))
expiration_date = Column(Integer)
product_id = Column(VARCHAR(45))
weight = Column(VARCHAR(20))
def __init__(self):
# self.name = name
# self.purchase_price = purchase_price
# self.operator = operator
# self.weight = weight
self.supplier = ''
# def __init__(self,name,purchase_price,operator,weight):
# self.name = name
# self.purchase_price = purchase_price
# self.operator = operator
# self.weight = weight
# self.supplier = ''
class Order_reservation(db.Model):
__tablename__ = 'order_reservation'
id = Column(Integer, primary_key=True)
order_id = Column(VARCHAR(45))
arrival_time = Column(DateTime)
create_time = Column(DateTime,default=datetime.now())
user_id = Column(Integer,ForeignKey('user.user_id'))
user = relationship('User', backref=db.backref('order_reservation', lazy='dynamic'))
contact_number = Column(VARCHAR(45))
receive_name = Column(VARCHAR(45))
address_id = Column(Integer)
# user_address = relationship('User_address', backref=db.backref('order_reservation', lazy='dynamic'))
class User(db.Model):
__tablename__ = 'user'
# id = Column( Integer, primary_key=True)
user_id = Column(Integer, primary_key=True,autoincrement=True)
nickname = Column(VARCHAR(45))
phone_number = Column(VARCHAR(45))
username = Column(VARCHAR(80))
passwd = Column(VARCHAR(80))
register_time = Column(DateTime)
header_icon_url = Column(VARCHAR(60))
sex = Column(Integer)
login_status = Column(BOOLEAN)
def __init__(self,phone_number,passwd,login_status):
self.nickname = 'abc'
self.username = 'abc'
self.phone_number = phone_number
self.passwd = passwd
self.login_status = login_status
# id = Column('id', Integer, primary_key=True)
# phone_number = Column('phone_number', String(11), index=True)
# password = Column('password', VARCHAR(40))
# nickname = Column('nickname', VARCHAR(30), index=True, nullable=True)
# register_time = Column('register_time',DateTime, default=datetime.now())
# nickn = Column('nickname1', VARCHAR(30), index=True, nullable=True)
# head_picture = Column('head_picture', String(100), default='')
# if __name__ == '__main__':
# Base.metadata.create_all(engine) | [
"james.ltesim@gmail.com"
] | james.ltesim@gmail.com |
94699502bdcc20fd14f36e258fae8f7cc66a0bcd | 6564f0f7521ea93dd39d84231af3a4480dda3515 | /src/Program/Python/Testing/compareMatlabTest.py | d38f100b3d0a56c2d9e49a88e43648997a57f432 | [
"BSD-2-Clause"
] | permissive | smiths/swhs | 7cea899c565a92e16bf8345d00b550041f8579fd | 94e2aa9a53b46f4b874fc8812f4be37d9e909c11 | refs/heads/master | 2023-03-17T22:55:41.640819 | 2023-03-09T19:51:23 | 2023-03-09T19:51:23 | 54,129,717 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,336 | py | import sys
sys.path.insert(0, '.')
import unittest
import PCM_Error
class TestCompareMatlab(unittest.TestCase):
def setUp(self):
self.delta = 0.000005
def test_CM1(self):
errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'TWat')
errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'TPCM')
errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'EWat')
errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M01.out', 'P01.out', 'EPCM')
self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
def test_CM2(self):
errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'TWat')
errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'TPCM')
errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'EWat')
errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M02.out', 'P02.out', 'EPCM')
self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
def test_CM3(self):
errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'TWat')
errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'TPCM')
errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'EWat')
errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M03.out', 'P03.out', 'EPCM')
self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
def test_CM4(self):
errTw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'TWat')
errTp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'TPCM')
errEw = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'EWat')
errEp = PCM_Error.PCM_ErrorM('Testing/compareMatlab/M04.out', 'P04.out', 'EPCM')
self.assertAlmostEqual(errTw, 0, places=None, msg='Water temperature', delta=self.delta)
self.assertAlmostEqual(errTp, 0, places=None, msg='PCM temperature', delta=self.delta)
self.assertAlmostEqual(errEw, 0, places=None, msg='Water energy', delta=self.delta)
self.assertAlmostEqual(errEp, 0, places=None, msg='PCM energy', delta=self.delta)
class CompareMatlabSuite:
def suite(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestCompareMatlab)
return suite | [
"elwazana@mcmaster.ca"
] | elwazana@mcmaster.ca |
e84a46ac2e2f85508523f34849cab34348a212d3 | 25eea9b1560947f64f32f1bd6d7e044faec16112 | /code/lotka-volterra.py | 27a1e99c567b2118051e3f011900000041c78e02 | [] | no_license | retrosnob/Jupyter | 7a99b7903080ee2b30e7366b9ff3725c96976e68 | c06650b68a88764209c3a7cf6619cc615cddd5ad | refs/heads/master | 2020-04-25T19:00:50.854785 | 2019-03-01T05:52:20 | 2019-03-01T05:52:20 | 173,004,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | import matplotlib.pylab as pylab
# This is LV that is spelt out in 4.34 and 4.35
def initialize():
global r, d, b, c, K, x, y, resultx, resulty, t, timesteps
r = d = c = b = 1
K = 5
x = y = 1
resultx = [x]
resulty = [y]
t = 0
timesteps = [t]
def update():
global r, d, b, c, x, y, K, resultx, resulty, t, timesteps
nextx = x + r * x * (1 - x/K) - (1 - 1/(b * y + 1)) * x
nexty = y - d * y + c * x * y
x, y = nextx, nexty
t = t + 0.1
def observe():
# This function essentially records the current state of x, y and t so that
# they can be plotted over time.
global x, y, resultx, resulty, t, timesteps
resultx.append(x)
resulty.append(y)
timesteps.append(t)
initialize()
while t < 10:
update()
observe()
fig1 = pylab.figure()
fig2 = pylab.figure()
ax1 = fig1.add_subplot(111)
ax2 = fig2.add_subplot(111)
ax1.plot(timesteps, resultx, 'r-')
ax1.plot(timesteps, resulty, 'b--')
ax2.plot(resultx, resulty)
pylab.show() | [
"jusrobertson@yahoo.co.uk"
] | jusrobertson@yahoo.co.uk |
8e6cd6a6e9fc6d7afc2a6f70833a1a18db3e3359 | f6a83a9a5278d5af2dfc7d2f49cc2ca5e3f0ec6e | /scripts/gristle_validator | 495a88f4e298004b3471ccc037cbd592b6c667b0 | [
"BSD-3-Clause"
] | permissive | leroyjmcclure/DataGristle | 93c8418d8ecbc2935394c5052463b3b4cc560eae | a5bc150e308791abca6d8f38e86938905292f763 | refs/heads/master | 2020-12-30T18:30:28.857391 | 2013-01-06T22:54:24 | 2013-01-06T22:54:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,695 | #!/usr/bin/env python
""" Extracts subsets of input file based on user-specified columns and rows.
The input csv file can be piped into the program through stdin or identified
via a command line option. The output will default to stdout, or redirected
to a filename via a command line option.
The columns and rows are specified using python list slicing syntax -
so individual columns or rows can be listed as can ranges. Inclusion
or exclusion logic can be used - and even combined.
To do:
- work with analyze_file to produce a special exception for empty files.
- improve msg if user provides no args - and tell about -h
See the file "LICENSE" for the full license governing this code.
Copyright 2011 Ken Farmer
"""
#--- standard modules ------------------
from __future__ import division
import sys
import optparse
import csv
import fileinput
import collections
#from pprint import pprint as pp
#--- gristle modules -------------------
sys.path.append('../') # allows running from project structure
sys.path.append('../../') # allows running from project structure
import gristle.file_type as file_type
SMALL_SIDE = 0
LARGE_SIDE = 1
#------------------------------------------------------------------------------
# Command-line section
#------------------------------------------------------------------------------
def main():
""" runs all processes:
- gets opts & args
- analyzes file to determine csv characteristics unless data is
provided via stdin
- runs each input record through process_cols to get output
- writes records
"""
bad_field_cnt = collections.defaultdict(int)
(opts, files) = get_opts_and_args()
if len(files) == 1:
my_file = file_type.FileTyper(files[0],
opts.delimiter ,
opts.recdelimiter,
opts.hasheader)
try:
my_file.analyze_file()
except file_type.IOErrorEmptyFile:
return 1
dialect = my_file.dialect
else:
# dialect parameters needed for stdin - since the normal code can't
# analyze this data.
dialect = csv.Dialect
dialect.delimiter = opts.delimiter
dialect.quoting = opts.quoting
dialect.quotechar = opts.quotechar
dialect.lineterminator = '\n' # naive assumption
rec_cnt = -1
if opts.output == '-':
outfile = sys.stdout
else:
outfile = open(opts.output, "w")
for rec in csv.reader(fileinput.input(files), dialect):
rec_cnt += 1
if not rec:
break
if len(rec) != opts.fieldcnt:
bad_field_cnt[len(rec)] += 1
write_fields('field_cnt-%d' % len(rec), rec_cnt, outfile, rec, dialect.delimiter)
fileinput.close()
if opts.output != '-':
outfile.close()
return 0
def write_fields(label, rec_cnt, outfile, fields, delimiter):
""" Writes output to output destination.
Input:
- list of fields to write
- output object
Output:
- delimited output record written to stdout
To Do:
- write to output file
"""
rec = label + delimiter + str(rec_cnt) + delimiter + delimiter.join(fields)
outfile.write(rec + '\n')
def get_opts_and_args():
""" gets opts & args and returns them
Input:
- command line args & options
Output:
- opts dictionary
- args dictionary
"""
use = ("%prog is used to extract column and row subsets out of files "
"and write them out to stdout or a given filename: \n"
" \n"
" %prog [file] [misc options]")
parser = optparse.OptionParser(usage = use)
parser.add_option('-o', '--output',
default='-',
help='Specifies the output file. The default is stdout. Note that'
'if a filename is provided the program will override any '
'file of that name.')
parser.add_option('-f', '--fieldcnt',
type=int,
help=('Specify the number of fields in the record'))
parser.add_option('-d', '--delimiter',
help=('Specify a quoted single-column field delimiter. This may be'
'determined automatically by the program.'))
parser.add_option('--quoting',
default=False,
help='Specify field quoting - generally only used for stdin data.'
' The default is False.')
parser.add_option('--quotechar',
default='"',
help='Specify field quoting character - generally only used for '
'stdin data. Default is double-quote')
parser.add_option('--recdelimiter',
help='Specify a quoted end-of-record delimiter. ')
parser.add_option('--hasheader',
default=False,
action='store_true',
help='Indicate that there is a header in the file.')
(opts, files) = parser.parse_args()
if files:
if len(files) > 1 and not opts.delimiter:
parser.error('Please provide delimiter when piping data into program via stdin or reading multiple input files')
else: # stdin
if not opts.delimiter:
parser.error('Please provide delimiter when piping data into program via stdin or reading multiple input files')
return opts, files
if __name__ == '__main__':
sys.exit(main())
| [
"kenfar@watson.(none)"
] | kenfar@watson.(none) | |
e3edbf531c16d053c4fd00549d908b307ef863c1 | 9479ecfcf7647e87097a63574364396e16ad9ad9 | /Lab6/Tests/espytester.py | 748346508b9edce6e503e5a3d4177b984feb6315 | [] | no_license | AlexTelon/Javascript-interpreter-in-python-course | 710c2d34c1f620dd86ba7c74b5b5b8104fbda8c6 | d78fdb8e6d997cc24fd9058adbbe9af2771bd637 | refs/heads/master | 2021-03-27T20:29:15.187606 | 2015-09-25T10:09:53 | 2015-09-25T10:09:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | #!/usr/bin/env python3
import sys
import os
import subprocess
class TestFailedExecption(Exception):
def __init__(self, testname, line, jspy_line, test_line, jspy_output):
super().__init__("Test '{}' has failed at line {}:\n=================\n{}=================\n\nes.py: {}\ntest : {}".format(testname, line, jspy_output, jspy_line, test_line))
class Options:
def __init__(self, executable, test_dir):
self.executable = executable
self.test_dir = test_dir
self.verbose = True
self.abort = True
def runTest(test, options):
print(test)
proc = subprocess.Popen([options.executable, options.test_dir + "/" + test + ".js"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
f = open(options.test_dir + "/" + test + ".txt")
i = 1
success = True
jspy_output = ""
for line in proc.stdout:
jso = line.decode("utf-8")
jspy_output += jso
jso = jso.strip('\n')
tso = f.readline().strip('\n')
if(tso != jso):
success = False
if(options.verbose):
print("Difference in ouptput at line: {}".format(i))
print("es.py: {}".format(jso))
print("test : {}".format(tso))
if(options.abort):
for line in proc.stdout:
o = line.decode("utf-8")
jspy_output += o
raise TestFailedExecption(test, i, jso, tso, jspy_output)
i += 1
next_test_line = f.readline()
if(len(next_test_line) != 0):
success = False
if(options.verbose):
print("Test data has more than the script output!")
if(options.abort):
raise TestFailedExecption(self, test, i, "", next_test_line, jspy_output)
for line in f:
print(line[0])
f.close()
return success
def main(argv):
exec = argv[1]
lab = argv[2]
options = Options(argv[1], argv[2] + "/Tests")
if(len(argv) == 4):
runTest(argv[3], options)
else:
tests = []
for root, dirs, files in os.walk( options.test_dir ):
for f in files:
full = (root + '/' + f)
full = full[len(options.test_dir) + 1:]
if(full.endswith(".txt")):
testname = full[0:len(full)-4]
elif(full.endswith(".js")):
testname = full[0:len(full)-3]
#no ~ files
elif(full.endswith("~")):
continue
# let these two files be there
elif(full[0:len(full)] == "run_tests" or full[0:len(full)] == "espytester.py"):
continue
else:
raise Exception("Invalid file: " + full)
pass
if(not testname in tests):
tests.append(testname)
if(len(tests) == 0):
raise Exception("Empty test suite")
tests.sort()
for test in tests:
runTest(test, options)
if __name__ == '__main__':
main(sys.argv)
| [
"alete471@student.liu.se"
] | alete471@student.liu.se |
a7cb946df369671616daab17c41faa79828876d7 | d973bb3b56f330dcb76c050780cfd4bf58fb6ba4 | /Tkinter/Adding Padding.py | 5f422f24d0e5beee5863563d9ade14132af9df19 | [] | no_license | rahulj1601/Python | 543328d72e3d1e70fdaf1453511de6cf83b7e887 | c6a0c02cfd8092bc896a5ca3ae88ed4889d95ab9 | refs/heads/master | 2022-06-29T16:21:07.449078 | 2020-05-13T15:26:36 | 2020-05-13T15:26:36 | 263,668,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # Adding Padding
from tkinter import *
window = Tk()
window.title("Complete Python Course")
window.geometry('400x200')
lbl1 = Label(window, text="label1", padx=20, pady=20)
lbl1.grid(column=0,row=0)
lbl2 = Label(window, text="label2", padx=20, pady=20)
lbl2.grid(column=1,row=0)
window.mainloop()
| [
"rahul1601@hotmail.co.uk"
] | rahul1601@hotmail.co.uk |
d0ec0a41b10c508f07c1ac2e6b2c38ba42f77c1e | 6e58f95a931db523a3957134ff8cac670d4c20be | /Hunter level/given 2 string checking whether they are same without using built in function.py | 0b85adde15d8c4f9f377c2739745e69844479191 | [] | no_license | ramyasutraye/python-programming-13 | 36235f324152d793ca1b2bf087d2a49a62d47787 | ea58462208bb4da826b9f1917bdad17c80d055dc | refs/heads/master | 2020-04-23T19:30:53.189933 | 2018-05-02T17:13:30 | 2018-05-02T17:13:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | a=str(input())
b=str(input())
l1=len(a)
l2=len(b)
if(l1==l2):
for i in range (l1):
if a[i]==b[i]:
flag=1
else:
flag=0
if (flag==1):
print ("yes")
else:
print ("no")
else:
print ("no")
| [
"noreply@github.com"
] | noreply@github.com |
043812cfa4b055a90db139595b11b7c0489f56d4 | 8b40529458e096d24c292f2a089e0caa652897c6 | /scripts/predict.py | ab1a0f7e6be4c5a1bfa98aa36971a72a2e49e37e | [] | no_license | Samykolon/Master | d0df63a4146b520b27914f7d5975e8716f00766e | c0686567eacc4c00fdb715d3927ca30e442d54a9 | refs/heads/master | 2022-12-30T05:07:45.748985 | 2020-10-07T10:21:04 | 2020-10-07T10:21:04 | 278,046,774 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,698 | py | # (C) Samuel Dressel 2020
# When TX-Server is running, handle request and response
import pyaudio
import wave
import time
import os
import os.path
from os import path
import numpy as np
import matplotlib.pyplot as plt
from python_speech_features import mfcc
from python_speech_features import logfbank
from pyAudioAnalysis import audioBasicIO
import scipy.io.wavfile as wav
import pyaudio
import audioop
import glob, shutil, sys, random
from pydub import AudioSegment
from pydub.utils import make_chunks
import soundfile as sf
import json
import requests
from ctypes import *
from contextlib import contextmanager
class DynamicUpdate():
# X RANGE
min_x = 0.0
max_x = 1.0
# Samplerate of the input
SAMPLERATE = 48000
# Format of the audio (bitdepth)
FORMAT = pyaudio.paInt16
# Number of channels
CHANNELS = 2
# Chunksize for recording
CHUNK = 1000
# File for temporary audio saving
WAVE_OUTPUT_FILENAME = "/home/smu/Desktop/RNN/TEST.wav"
# NFFT - This is the frequency resolution
# By default, the FFT size is the first equal or superior power of 2 of the window size.
# If we have a samplerate of 48000 Hz and a window size of 800 ms, we get 38400 samples in each window.
# The next superior power would be 65536 so we choose that
NFFT = 65536
# Size of the Window
WINDOW_SIZE = 0.8
# Window step Size = Window-Duration/8 - Overlapping Parameter
WINDOW_STEP = 0.1
# Units for Training
UNITS = 512
# Number of MFCCs
NUMCEP = 40
# Number of Melfilters
NUMFILT = 40
# Preemph-Filter to reduce noise
PREEMPH = 0.0
# Record Seconds
RECORD_SECONDS = 6
# Data for Plot
xdata = ['Wut','Langeweile','Ekel','Angst','Freude','Trauer','Neutral']
ydata = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Input Device
INPUT_DEVICE = "AT2020 USB: Audio"
def paintplot(self, xdata, ydata):
y_pos = np.arange(len(xdata))
plt.ylim(0.0,1.0)
plt.bar(y_pos, ydata)
plt.xticks(y_pos, xdata)
plt.tight_layout()
plt.show()
def record_and_calculate(self):
print("Looking for audio input device ... ")
pa = pyaudio.PyAudio()
chosen_device_index = -1
for x in range(0,pa.get_device_count()):
info = pa.get_device_info_by_index(x)
if self.INPUT_DEVICE in info["name"]:
chosen_device_index = info["index"]
print(chosen_device_index)
pa.terminate()
print("Start recording - speak for 6 seconds ...")
audio = pyaudio.PyAudio()
stream = audio.open(format=self.FORMAT, channels=self.CHANNELS,
rate=self.SAMPLERATE, input=True,
frames_per_buffer=self.CHUNK, input_device_index=chosen_device_index)
frames = []
for i in range(0, int(self.SAMPLERATE / self.CHUNK * self.RECORD_SECONDS)):
data = stream.read(self.CHUNK, exception_on_overflow = False)
frames.append(data)
print("Done Recording!")
print("Calculating features ...")
waveFile = wave.open(self.WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(self.CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(self.FORMAT))
waveFile.setframerate(self.SAMPLERATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
sound = AudioSegment.from_file(self.WAVE_OUTPUT_FILENAME)
sound = sound.set_channels(1)
change_in_dBFS = -40.0 - sound.dBFS
sound = sound.apply_gain(change_in_dBFS)
sound.export(self.WAVE_OUTPUT_FILENAME, format="wav")
stream.stop_stream()
stream.close()
audio.terminate()
(rate,sig) = wav.read(self.WAVE_OUTPUT_FILENAME)
mfcc_feat = mfcc(sig, rate, numcep=self.NUMCEP, nfilt=self.NUMFILT, winlen=self.WINDOW_SIZE, winstep=self.WINDOW_STEP, nfft=self.NFFT, preemph=self.PREEMPH)
mfcc_feat = np.expand_dims(mfcc_feat, axis=0)
data = json.dumps({"signature_name": "serving_default",
"instances": mfcc_feat.tolist()})
headers = {"content-type": "application/json"}
json_response = requests.post('http://localhost:9000/v1/models/emotiondetection/versions/1:predict', data=data, headers=headers)
result = json.loads(json_response.text)
result = result["predictions"]
self.ydata = [result[0][0], result[0][1], result[0][2], result[0][3], result[0][4], result[0][5], result[0][6]]
def __call__(self):
self.record_and_calculate()
self.paintplot(self.xdata, self.ydata)
d = DynamicUpdate()
d()
| [
"dressels@mailserver.tu-freiberg.de"
] | dressels@mailserver.tu-freiberg.de |
c2dd1be41120c2c01716a3f9db7a579ff46b99ce | 8f5e4e2aa50a629c93ad7be317d7139f7394e699 | /행렬의덧셈.py | de18616f213d7beb321d6caf468a217abc79197e | [] | no_license | noh-hyeonseong/python3-algorithm-level1 | d0c9b76d539e6cab5c68bb6c5a7ba12e87073640 | 5aec6c24fb3c3fb2833bdc80e4af7c0bd9e8fddd | refs/heads/master | 2023-06-06T02:34:02.882296 | 2021-06-29T06:16:05 | 2021-06-29T06:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import numpy as np
def solution(arr1, arr2):
"""
행렬은 numpy 모듈을 활용하면 더 쉽게 풀 수 있음
import numpy as np
"""
#numpy 사용 코드
A = np.array(arr1)
B = np.array(arr2)
answer = (A + B).tolist()
print(answer)
return answer
#기존 코드
# answer = []
# for i, j in zip(arr1, arr2):
# tempArr = []
# for k, p in zip(i, j):
# tempArr.append(k+p)
# answer.append(tempArr)
# return answer | [
"shgustjd6262@naver.com"
] | shgustjd6262@naver.com |
462a37efb98d640ba9b874b7b10f047c674f5333 | b2fc9e6bc140a77c730ca0b13a738e139f214f05 | /mbl/util/string.py | 1807b15da38b029b448b1436418e51fe3600941e | [] | no_license | lachtan/mblib | 31febb60855413aeeb70c15a28f7d4f8204a9c03 | 3378818e74f010c14ffe554cdf79ead3af2dfff7 | refs/heads/master | 2020-05-17T23:17:41.953208 | 2011-01-27T20:06:51 | 2011-01-27T20:06:51 | 753,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import re
def split(pattern, text, itemsCount, emptyItem = ''):
""" split text into itemCount items
missing items fills with emtyItem """
if itemsCount < 1:
raise AttributeError('itemsCount must be greater or equal 1 (%d)' % itemsCount)
array = re.split(pattern, text, itemsCount - 1)
if len(array) < itemsCount:
array += [emptyItem] * (itemsCount - len(array))
return array
| [
"lachtan@fnet.cz"
] | lachtan@fnet.cz |
416bd9a60261c8fd6ab915b6fa7b0f273d413d0f | ae2c287af9ce5c74a7361a79f5532aef44a5f1c0 | /Util.py | 99dead0362c86a76bfec16949292068d893cdc57 | [] | no_license | richars1111/juego-dados | 9eba87fc0df736a5ba29f0577a18c475a9d256e2 | e4e67f808299152f58d31ca4f50c5edf5ed5d753 | refs/heads/main | 2023-08-15T11:56:31.342781 | 2021-09-18T00:21:04 | 2021-09-18T00:21:04 | 406,808,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | from tkinter import *
def agregarImagen(ventana, archivo, fila, columna):
img = PhotoImage(file=archivo)### carga la imagen en la memoria y recibe como parametro el nombre del archivo
lbl= Label(ventana)### se instancio
lbl.config(image=img) ### ala etiqueta se la asigno la imagen
lbl.image=img
lbl.grid(row=fila, column=columna)### se ubica en la fila y columna que se quiere
return lbl
def agregarCajaTextoSalida(ventana, ancho, fila, columna, fuente):
txt = Entry(ventana, width=ancho, font=fuente)
txt.grid(row=fila, column=columna)
txt.configure(state=DISABLED)
return txt
def mostrarCajaTexto(txt, valor):
txt.configure(state=NORMAL)
txt.delete(0, END)
txt.insert(0, valor)
txt.configure(state=DISABLED)
| [
"90426768+richars1111@users.noreply.github.com"
] | 90426768+richars1111@users.noreply.github.com |
9cd8a6e55a4e5085df6657d0a04781d0dee9ed7b | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/training/saver_large_variable_test.py | f19600a79e7b85bc841cc500c0681bd62a3cd3a6 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 2,386 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargeVariableTest(test.TestCase):
# NOTE: This is in a separate file from saver_test.py because the
# large allocations do not play well with TSAN, and cause flaky
# failures.
def testLargeVariable(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
with session.Session("", graph=ops.Graph()) as sess:
# Declare a variable that is exactly 2GB. This should fail,
# because a serialized checkpoint includes other header
# metadata.
with ops.device("/cpu:0"):
var = variables.Variable(
constant_op.constant(
False, shape=[2, 1024, 1024, 1024], dtype=dtypes.bool))
save = saver.Saver(
{
var.op.name: var
}, write_version=saver_pb2.SaverDef.V1)
var.initializer.run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Tensor slice is too large to serialize"):
save.save(sess, save_path)
if __name__ == "__main__":
test.main()
| [
"v-grniki@microsoft.com"
] | v-grniki@microsoft.com |
0dffad33f2dccbceaee1cb127899c1c8b0e10fa0 | 17090fa1f7849f82a35d1d53526c57889875386d | /IPProxy/spider.py | 5eea61ac808da8c298ab4e013e2c08d98454fcbf | [] | no_license | apple0523/Py-Lawrence | a2dc9943ef36fe48eb276ea6a8e9eaf15f4041e3 | e1460ca205e58abef0ff9263b7dddb944c3bbfec | refs/heads/master | 2021-04-09T16:29:44.292847 | 2018-03-19T08:26:13 | 2018-03-19T08:26:13 | 125,806,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | #!/bin/env python
# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import random
class GetProxyIP:
def __init__(self,page=10):
self._page = page
self.url_head = 'http://www.xicidaili.com/wt/'
def get_ip(self):
"""
get resouce proxy ip pool
:return: res_pool list
"""
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"}
res_pool = []
for pagenum in range(1,self._page):
url = self.url_head + str(pagenum)
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
soup_tr = soup.find_all('tr')
for item in soup_tr:
try:
soup_td = item.find_all('td')
res_pool.append(soup_td[5].text.lower() + '://' + soup_td[1].text + ':' + soup_td[2].text)
except IndexError:
pass
return res_pool
def right_proxies(self,res_pool):
"""
check available ip
:param res_pool:
:return:right_pool list
"""
right_pool = []
for ip in res_pool:
if 'https' in ip:
proxies = {'http': ip}
else:
proxies = {"http": ip}
check_urllist = ['http://www.baidu.com', 'http://www.taobao.com', 'https://cloud.tencent.com/']
try:
response = requests.get(random.choice(check_urllist), proxies=proxies, timeout = 1)
if response.status_code:
right_pool.append(proxies)
print('add ip %s' % proxies)
except Exception as e:
continue
return right_pool
if __name__ == '__main__':
proxyhelper = GetProxyIP(2)
res_pool = proxyhelper.get_ip()
proxy_ip =proxyhelper.right_proxies(res_pool)
print(proxy_ip)
| [
"tc.first@163.com"
] | tc.first@163.com |
56e6115affba47ecce6d8012f1beea9bd3f675f6 | 348121e654a4a626b7eef745319a7168f205ebe9 | /project/simpleapp/admin.py | 5a79ea278a4ffcee0a0845a3ed521d58efaf0b72 | [] | no_license | Vladxex/news_2 | 3d6aada6f4a32ab9a2ceb049d80667b1b2891074 | 4944c2fc55fb7c1384fdcfa77dcdc8da696ca9e9 | refs/heads/main | 2023-02-02T16:56:45.528881 | 2020-12-17T12:26:55 | 2020-12-17T12:26:55 | 322,286,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from django.contrib import admin
from .models import Category, New
admin.site.register(Category)
admin.site.register(New)
# Register your models here.
| [
"vlad_19_96@mail.ru"
] | vlad_19_96@mail.ru |
3d6963aee849bab68187c59aa775a10ae4a266f5 | 84b5ac79cb471cad1d54ed1d2c842dc5581a03f0 | /branches/pylint/config/scripts/paella-export-profile | 777b1b8eccf1dd0bbd7a397eb1c75fd8f43c9ebf | [] | no_license | BackupTheBerlios/paella-svn | c8fb5ea3ae2a5e4ca6325a0b3623d80368b767f3 | d737a5ea4b40f279a1b2742c62bc34bd7df68348 | refs/heads/master | 2021-01-18T14:07:40.881696 | 2012-11-13T20:33:08 | 2012-11-13T20:33:08 | 40,747,253 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 377 | #!/usr/bin/env python
import os, sys
from paella.db import PaellaConnection
from paella.db.profile.xmlgen import PaellaProfiles
conn = PaellaConnection()
args = sys.argv[1:]
profile = args[0]
profiles = PaellaProfiles(conn)
tfile = file(profile + '.xml', 'w')
xml = profiles.export_profile(profile)
xml.writexml(tfile, indent='\t', newl='\n', addindent='\t')
tfile.close()
| [
"umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f"
] | umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f | |
e97597a8c8c4e1f494270f4447ac1ac51ab282c5 | 37dcae73b748cc8eef0cb8e1a2dc0b2bf728b359 | /week4/swapi.py | 8a864920496b78ab9e259ff0c6a3e7f557073794 | [] | no_license | Jagestah/homegrown | e40613a89727acad17bd9915a9e6521e74b21568 | 475ce89fe7d8316465b94d10f27a50742557e34a | refs/heads/master | 2021-01-04T00:25:08.647359 | 2020-09-03T01:15:49 | 2020-09-03T01:15:49 | 240,300,802 | 2 | 1 | null | 2020-08-05T20:24:48 | 2020-02-13T15:58:50 | null | UTF-8 | Python | false | false | 1,608 | py | #! /bin/python3
###
# To use this code run `pip3 install -r requirements.txt`
###
import requests
# https://requests.readthedocs.io/en/master/
import pprint
# https://docs.python.org/3/library/pprint.html
import time
import sys
STARSHIP_ID = sys.argv[1]
# PrettyPrint changes ugly single-line json in to a human readable format
pp = pprint.PrettyPrinter(indent=2)
# Pull the information about some starships from the API
response = requests.get('https://swapi.dev/api/starships/')
# pp.pprint(response)
# Convert to response to JSON
response = response.json()
# pp.pprint(response)
# Give us just the list of starships from the response
starships = response["results"]
# pp.pprint(starships)
#### PUT ALL NEW CODE BELOW THIS LINE ####
# print(len(starships))
# pp.pprint(starships[0])
while True:
success = False
for starship in starships:
starship_url = starship["url"].rsplit("/")
if STARSHIP_ID in starship_url:
print("Name: "+starship["name"])
print(" Class: "+starship["starship_class"])
# print(type(starship["pilots"]))
if len(starship["pilots"]) > 0:
print(" Pilots: ")
for pilot in starship["pilots"]:
# print("Pilot URL: "+pilot)
pilot_data = requests.get(pilot)
pilot_data_json = pilot_data.json()
print(" "+pilot_data_json["name"])
# print("Pilots: "+starship["pilots"])
success = True
if success == False:
print("Unable to find a ship with ID "+STARSHIP_ID)
time.sleep(10)
| [
"mckeeverjohnj@gmail.com"
] | mckeeverjohnj@gmail.com |
fb9df43b05592866ffc871b29938d26eeff422d1 | f52df32b5a052d290992151e907603147afb53b0 | /sem1/maths/P2CSN17003/3Third.py | ba3b73a20a2a059bfb4ab861de446f898a840e3b | [] | no_license | afterlunch/cybersec | 5773907b34e3c1dae8f48ad068fef11f23ac1e7d | 5a56a224c6446cbd71659b5909568efb0c6e1055 | refs/heads/master | 2022-03-25T02:55:30.481066 | 2018-03-10T13:36:10 | 2018-03-10T13:36:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #gcd
m=int(input("Enter first number :"))
n=input("Enter Second number :")
x=m
y=n
r=1
gcd=1
if m>n:
a=n
b=m
else :
b=n
a=m
if a==0:
gcd=b
print "gcd=",gcd
exit(0)
while r!=0:
q=b/a
r=b%a
gcd=a
b=a
a=r
print "\ngcd(%d,%d)= %d"%(x,y,gcd)
| [
"c.amritanand@gmail.com"
] | c.amritanand@gmail.com |
847995f75e55b5833fb20f9af6021ed941d2660a | c8d507477bbd856bf4f41954186368474835a066 | /venv/Scripts/easy_install-3.7-script.py | 8cc65eea49728ce362b27be80678a2680b6473c3 | [] | no_license | jmb3471/AlgoHw | ffc4526c59c0cc1b6f77b18e9c1501020bd4d23b | a2da48ddce0b692e012028593ad7db59c80f2956 | refs/heads/master | 2023-02-03T02:19:05.904255 | 2020-12-28T00:39:58 | 2020-12-28T00:39:58 | 324,876,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | #!"C:\Users\Jonathan Baxley\Desktop\AlgoHw\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"47160881+jmb3471@users.noreply.github.com"
] | 47160881+jmb3471@users.noreply.github.com |
bbbfb496488a02ad49a1820a1d8e385052809eb7 | 3950cb348a4a3ff6627d502dbdf4e576575df2fb | /.venv/Lib/site-packages/apptools/persistence/versioned_unpickler.py | 25338c9278da68d60cad7b6d117da78e73aaacdc | [] | no_license | Bdye15/Sample_Programs | a90d288c8f5434f46e1d266f005d01159d8f7927 | 08218b697db91e55e8e0c49664a0b0cb44b4ab93 | refs/heads/main | 2023-03-02T04:40:57.737097 | 2021-01-31T03:03:59 | 2021-01-31T03:03:59 | 328,053,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,883 | py | # (C) Copyright 2005-2020 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
# Standard library imports
from pickle import _Unpickler as Unpickler
from pickle import UnpicklingError, BUILD
import logging
from types import GeneratorType
# Enthought library imports
from apptools.persistence.updater import __replacement_setstate__
logger = logging.getLogger(__name__)
##############################################################################
# class 'NewUnpickler'
##############################################################################
class NewUnpickler(Unpickler):
"""An unpickler that implements a two-stage pickling process to make it
possible to unpickle complicated Python object hierarchies where the
unserialized state of an object depends on the state of other objects in
the same pickle.
"""
def load(self, max_pass=-1):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
# List of objects to be unpickled.
self.objects = []
# We overload the load_build method.
dispatch = self.dispatch
dispatch[BUILD[0]] = NewUnpickler.load_build
# call the super class' method.
ret = Unpickler.load(self)
self.initialize(max_pass)
self.objects = []
# Reset the Unpickler's dispatch table.
dispatch[BUILD[0]] = Unpickler.load_build
return ret
def initialize(self, max_pass):
# List of (object, generator) tuples that initialize objects.
generators = []
# Execute object's initialize to setup the generators.
for obj in self.objects:
if hasattr(obj, "__initialize__") and callable(obj.__initialize__):
ret = obj.__initialize__()
if isinstance(ret, GeneratorType):
generators.append((obj, ret))
elif ret is not None:
raise UnpicklingError(
"Unexpected return value from "
"__initialize__. %s returned %s" % (obj, ret)
)
# Ensure a maximum number of passes
if max_pass < 0:
max_pass = len(generators)
# Now run the generators.
count = 0
while len(generators) > 0:
count += 1
if count > max_pass:
not_done = [x[0] for x in generators]
msg = """Reached maximum pass count %s. You may have
a deadlock! The following objects are
uninitialized: %s""" % (
max_pass,
not_done,
)
raise UnpicklingError(msg)
for o, g in generators[:]:
try:
next(g)
except StopIteration:
generators.remove((o, g))
# Make this a class method since dispatch is a class variable.
# Otherwise, supposing the initial VersionedUnpickler.load call (which
# would have overloaded the load_build method) makes a pickle.load call at
# some point, we would have the dispatch still pointing to
# NewPickler.load_build whereas the object being passed in will be an
# Unpickler instance, causing a TypeError.
def load_build(cls, obj):
# Just save the instance in the list of objects.
if isinstance(obj, NewUnpickler):
obj.objects.append(obj.stack[-2])
Unpickler.load_build(obj)
load_build = classmethod(load_build)
class VersionedUnpickler(NewUnpickler):
"""This class reads in a pickled file created at revision version 'n'
and then applies the transforms specified in the updater class to
generate a new set of objects which are at revision version 'n+1'.
I decided to keep the loading of the updater out of this generic class
because we will want updaters to be generated for each plugin's type
of project.
This ensures that the VersionedUnpickler can remain ignorant about the
actual version numbers - all it needs to do is upgrade one release.
"""
def __init__(self, file, updater=None):
Unpickler.__init__(self, file)
self.updater = updater
def find_class(self, module, name):
"""Overridden method from Unpickler.
NB __setstate__ is not called until later.
"""
if self.updater:
# check to see if this class needs to be mapped to a new class
# or module name
original_module, original_name = module, name
module, name = self.updater.get_latest(module, name)
# load the class...
klass = self.import_name(module, name)
# add the updater.... TODO - why the old name?
self.add_updater(original_module, original_name, klass)
else:
# there is no updater so we will be reading in an up to date
# version of the file...
try:
klass = Unpickler.find_class(self, module, name)
except Exception:
logger.error("Looking for [%s] [%s]" % (module, name))
logger.exception(
"Problem using default unpickle functionality"
)
# restore the original __setstate__ if necessary
fn = getattr(klass, "__setstate_original__", False)
if fn:
setattr(klass, "__setstate__", fn)
return klass
def add_updater(self, module, name, klass):
"""If there is an updater defined for this class we will add it to the
class as the __setstate__ method.
"""
fn = self.updater.setstates.get((module, name), False)
if fn:
# move the existing __setstate__ out of the way
self.backup_setstate(module, klass)
# add the updater into the class
setattr(klass, "__updater__", fn)
# hook up our __setstate__ which updates self.__dict__
setattr(klass, "__setstate__", __replacement_setstate__)
else:
pass
def backup_setstate(self, module, klass):
"""If the class has a user defined __setstate__ we back it up."""
if getattr(klass, "__setstate__", False):
if getattr(klass, "__setstate_original__", False):
# don't overwrite the original __setstate__
name = "__setstate__%s" % self.updater.__class__
else:
# backup the original __setstate__ which we will restore
# and run later when we have finished updating the class
name = "__setstate_original__"
method = getattr(klass, "__setstate__")
setattr(klass, name, method)
else:
# the class has no __setstate__ method so do nothing
pass
def import_name(self, module, name):
"""
If the class is needed for the latest version of the application then
it should presumably exist.
If the class no longer exists then we should perhaps return
a proxy of the class.
If the persisted file is at v1 say and the application is at v3 then
objects that are required for v1 and v2 do not have to exist they only
need to be placeholders for the state during an upgrade.
"""
module = __import__(module, globals(), locals(), [name])
return vars(module)[name]
| [
"brady.dye@bison.howard.edu"
] | brady.dye@bison.howard.edu |
012bdc029e1fff6ec79f8cfc06baae7b9eb69a44 | ffa8b19913d891a655ff78384847ea9fdc5b0bc9 | /test/test_group_id_for_group_user_inclusion.py | 9c9b9f0104942ca2565e0a4e5b474fdf91deaf59 | [] | no_license | ccalipSR/python_sdk2 | b76124f409e26128ff291d2c33612883929c1b5f | d8979ed7434f4ffbc62fc30c90d40d93a327b7d1 | refs/heads/master | 2020-04-09T17:13:43.581633 | 2018-12-05T06:53:50 | 2018-12-05T06:53:50 | 160,473,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,613 | py | # coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) # noqa: E501
OpenAPI spec version: 3.0.0
Contact: support@looker.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.group_id_for_group_user_inclusion import GroupIdForGroupUserInclusion # noqa: E501
from swagger_client.rest import ApiException
class TestGroupIdForGroupUserInclusion(unittest.TestCase):
"""GroupIdForGroupUserInclusion unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGroupIdForGroupUserInclusion(self):
"""Test GroupIdForGroupUserInclusion"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.group_id_for_group_user_inclusion.GroupIdForGroupUserInclusion() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"ccalip@shoprunner.com"
] | ccalip@shoprunner.com |
420c5b8ebde4703aad8b3b2b5263f14ff5f2fff0 | 35640d1446d03c35a56cbb7d408841833f36d99b | /Lab3/Phase1/keyParser.py | f66030f74f35844774d63f424cff9c7347a42a65 | [] | no_license | ceneri/Computer-Security | 17d0b6be81535744b062dd58abf002935958a3e8 | a086127dd57ed4fd94b7cba18a26b0acdf21a4fb | refs/heads/master | 2021-03-19T10:54:31.614969 | 2018-03-29T18:57:44 | 2018-03-29T18:57:44 | 117,271,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | #!/usr/bin/env python3
"""
kyParser takes as input file containing an obfuscated key (Long string of characters)
and outputs all possible substrings of size KEY_SIZE to a line in the specified output file
To call script:
python keyParser.py <inputFileName> <outputFileName>
"""
import sys
#Const Values
KEY_SIZE = 32
def getObfuscatedKey(key_file):
file = open(key_file, "r")
for line in file:
key = line[:-1]
file.close()
return key
def keyParsing(input_file, output_file):
#Get obfuscated key
obfuscated =getObfuscatedKey(input_file)
obfLength = len(obfuscated)
#Open File for writing
file = open(output_file, "w")
for i in range(obfLength - KEY_SIZE + 1):
file.write(obfuscated[i:i+32] + '\n')
file.close()
def main():
#Get file arguments
INPUT_FILE = sys.argv[1]
OUTPUT_FILE = sys.argv[2]
keyParsing(INPUT_FILE, OUTPUT_FILE)
if __name__ == '__main__':
main() | [
"ceneri@ucsc.edu"
] | ceneri@ucsc.edu |
e08b029efc67736d02c527c4555e24f9cf75f702 | 7d7cbc1dae1252a30797a3c6a10fa109086b3cd5 | /producto/producto_views.py | d237388f080e87838b4d5d9b50d3e10c8f3e72f9 | [] | no_license | ShinichiroMike/consignacion-dojo | 6ad27db404f790976f33c7a63285c64c8c3476d2 | 548832d449c5f1437dd87f327eb5c95bade209ea | refs/heads/master | 2021-09-06T23:54:36.024369 | 2018-02-13T17:53:27 | 2018-02-13T17:53:27 | 113,883,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
# from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from .models import Producto
from filters.views import FilterMixin
from django.views.generic import DetailView
import django_filters
# Vista de producto con soft delete, vista basada en funcion
# vista para listar todos los productos que no han sido eliminados
# @login_required
# @user_passes_test(lambda u: u.profile.is_admin == True)
# def ProductoList(request):
# if request.method == 'POST':
# pk = request.POST.get('id')
# if pk is not None:
# producto = Producto.objects.filter(pk=pk).update(deleted=True)
# productos_list = Producto.objects.filter(deleted=False).order_by('id')
# page = request.GET.get('page', 1)
# paginator = Paginator(productos_list, 10)
# try:
# productos = paginator.page(page)
# except PageNotAnInteger:
# productos = paginator.page(1)
# except EmptyPage:
# productos = paginator.page(paginator.num_pages)
# context = {'productos': productos}
# return render(request, 'producto/list_producto.html', context)
from .filters import ProductoFilter
class ProductoList(LoginRequiredMixin, FilterMixin, django_filters.views.FilterView):
model = Producto
paginate_by = 10
filterset_class = ProductoFilter
success_url = reverse_lazy('producto_list')
def post(self, request, *args, **kwargs):
id_producto = request.POST.get('id')
if id_producto is not None:
self.model.objects.filter(pk=id_producto).update(deleted=True)
return HttpResponseRedirect(self.success_url)
def get_queryset(self):
return self.model.objects.filter(deleted=False).order_by('id')
class ProductoDetail(LoginRequiredMixin, DetailView):
model = Producto
template_name = 'producto/producto_detail.html' | [
"shirowatla@gmail.com"
] | shirowatla@gmail.com |
66ec008d30b4c1f2d4f26989dac24fc236c62540 | 4a117dc0df46efd84b7a8860f65e66b9c25c85ab | /case3_2.py | 717b3a50822f2e8c9e9ff9e5cd590e56d1b2a399 | [] | no_license | XushengLuo/LTL_MRTA_optimal | d8ee1797f9730f5a418245eb2db659316d018eec | ac6e463a6b6162f188c835f298edf3934eeec341 | refs/heads/master | 2022-12-09T05:35:27.193483 | 2020-09-05T02:09:14 | 2020-09-05T02:09:14 | 278,577,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,284 | py | from task import Task
from restricted_buchi_parse import Buchi
from datetime import datetime
import restricted_poset
from workspace_case3 import Workspace
import matplotlib.pyplot as plt
import restricted_weighted_ts
import restricted_weighted_ts_suffix
import restricted_milp
import restricted_milp_suf
import pickle
from vis import plot_workspace
import numpy
from post_processing import run
# from MAPP_heuristic import mapp
from restricted_GMAPP import mapp, compute_path_cost, return_to_initial
from vis import vis
import sys
from termcolor import colored, cprint
import networkx as nx
from sympy.logic.boolalg import to_dnf
import numpy as np
print_red_on_cyan = lambda x: cprint(x, 'blue', 'on_red')
def ltl_mrta(formula):
start = datetime.now()
workspace = Workspace(formula)
with open('data/workspace', 'wb') as filehandle:
pickle.dump(workspace, filehandle)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# plot_workspace(workspace, ax)
# plt.show()
# with open('data/workspace', 'rb') as filehandle:
# workspace = pickle.load(filehandle)
type_robot_location = workspace.type_robot_location.copy()
# return to initial locations or not
loop = True
one_time = True
draw = False
show = False
best_cost = np.inf
cost = []
time_record = []
horizon_record = []
number_of_paths = 0
best_path = dict()
# --------------- constructing the Buchi automaton ----------------
task = Task(formula)
buchi = Buchi(task, workspace)
buchi.construct_buchi_graph()
if show:
print('partial time to build buchi: {0}'.format((datetime.now() - start).total_seconds()))
init_acpt = buchi.get_init_accept()
for pair, _ in init_acpt:
# workspace.type_robot_location = type_robot_location.copy()
# workspace.update_after_prefix()
# buchi.atomic_prop = workspace.atomic_prop
# buchi.regions = workspace.regions
init_state, accept_state = pair[0], pair[1]
# ======================================= prefix part =================================================#
# #
# #
# #
# ======================================= prefix part =================================================#
# ----------------- infer the poset -----------------------
pruned_subgraph, unpruned_subgraph, paths = buchi.get_subgraph(init_state, accept_state, 'prefix')
edge2element, element2edge = buchi.get_element(pruned_subgraph)
if not edge2element:
continue
element_component2label = buchi.element2label2eccl(element2edge, pruned_subgraph)
hasse_graphs = buchi.map_path_to_element_sequence(edge2element, paths)
if sys.argv[1] == 'f':
with open('data/poset', 'wb') as filehandle:
pickle.dump(pruned_subgraph, filehandle)
pickle.dump(edge2element, filehandle)
pickle.dump(element2edge, filehandle)
pickle.dump(element_component2label, filehandle)
pickle.dump(hasse_graphs, filehandle)
if sys.argv[1] == 'p':
with open('data/poset', 'rb') as filehandle:
pruned_subgraph = pickle.load(filehandle)
edge2element = pickle.load(filehandle)
element2edge = pickle.load(filehandle)
element_component2label = pickle.load(filehandle)
hasse_graphs = pickle.load(filehandle)
# loop over all posets
for _, poset_relation, pos, hasse_diagram in hasse_graphs:
if show:
print_red_on_cyan('================ prefix part ================')
# ----------------- restore after the suffix if not succeed -----------------
workspace.type_robot_location = type_robot_location.copy()
workspace.update_after_prefix()
buchi.atomic_prop = workspace.atomic_prop
buchi.regions = workspace.regions
robot2eccl = restricted_poset.element2robot2eccl(pos, element2edge, pruned_subgraph)
if show:
print('partial time to poset: {0}'.format((datetime.now() - start).total_seconds()))
if show:
for order in poset_relation:
print(pruned_subgraph.edges[element2edge[order[0]]]['formula'], ' -> ',
pruned_subgraph.edges[element2edge[order[1]]]['formula'])
print('----------------------------------------------')
incomparable_element, larger_element, smaller_element, strict_larger_element = \
restricted_poset.incomparable_larger(pos, poset_relation, hasse_diagram)
# --------------- construct the routing graph ---------------
init_type_robot_node, element_component_clause_literal_node, node_location_type_component_element, \
num_nodes = restricted_weighted_ts.construct_node_set(pos, element2edge, pruned_subgraph,
workspace.type_robot_label)
edge_set = restricted_weighted_ts.construct_edge_set(pos, element_component_clause_literal_node,
element2edge, pruned_subgraph,
element_component2label,
init_type_robot_node, incomparable_element,
strict_larger_element,
larger_element, buchi.imply)
ts = restricted_weighted_ts.construct_graph(num_nodes, node_location_type_component_element, edge_set,
workspace.p2p)
if show:
print('partial time before milp: {0}'.format((datetime.now() - start).total_seconds()))
# --------------------- MILP -------------------------
maximal_element = [node for node in hasse_diagram.nodes() if hasse_diagram.in_degree(node) == 0]
robot_waypoint_pre, robot_time_pre, id2robots, robot_label_pre, \
robot_waypoint_axis, robot_time_axis, time_axis, acpt_run, \
= restricted_milp.construct_milp_constraint(ts, workspace.type_num, pos,
pruned_subgraph,
element2edge,
element_component_clause_literal_node,
poset_relation,
init_type_robot_node,
strict_larger_element,
incomparable_element,
larger_element,
robot2eccl, init_state,
buchi, maximal_element, show)
if not robot_waypoint_pre:
continue
for robot, time in list(robot_time_pre.items()):
# delete such robots that did not participate (the initial location of robots may just satisfies)
if time[-1] == 0 and len(time) == 1:
del robot_time_pre[robot]
del robot_waypoint_pre[robot]
if show:
print('----------------------------------------------')
for type_robot, waypoint in robot_waypoint_pre.items():
print(type_robot, " : ", waypoint)
print(type_robot, " : ", robot_time_pre[type_robot])
print(type_robot, " : ", robot_label_pre[type_robot])
print('----------------------------------------------')
print('time axis: ', time_axis)
for robot, time in list(robot_time_axis.items()):
# delete such robots that did not participate (the initial location of robots may just satisfies)
if not time:
del robot_time_axis[robot]
del robot_waypoint_axis[robot]
if show:
for type_robot, waypoint in robot_waypoint_axis.items():
print(type_robot, " : ", waypoint)
print(type_robot, " : ", robot_time_axis[type_robot])
print('----------------------------------------------')
for stage in acpt_run:
print(stage)
print('----------------------------------------------')
# --------------------- GMRPP -------------------------
robot_path_pre = mapp(workspace, buchi, acpt_run, robot_waypoint_axis, robot_time_axis,
'simultaneous', show)
# vis(workspace, robot_path_pre, {robot: [len(path)] * 2 for robot, path in robot_path_pre.items()},
# [])
# ----------------- check whether the final locations of the prefix part satisfy the accept state ---------
workspace.type_robot_location = {robot: path[-1] for robot, path in robot_path_pre.items()}
workspace.update_after_prefix(loop)
buchi.atomic_prop = workspace.atomic_prop
buchi.regions = workspace.regions
last_subtask = acpt_run[-1]
# add the removed self-loop of initial state
if buchi.remove_init_attr:
nx.set_node_attributes(pruned_subgraph, {init_state: buchi.remove_init_attr})
# check whether final locations satisfy the self-loop of the accept state
if buchi.ap_sat_label(pruned_subgraph.nodes[accept_state]['label'],
pruned_subgraph.nodes[accept_state]['neg_label']):
end = datetime.now()
if show:
print('total time for the prefix parts: {0}'.format((end - start).total_seconds()))
cost_pre = compute_path_cost(robot_path_pre)
cost.append(cost_pre)
time_record.append((end - start).total_seconds())
horizon_record.append(len(robot_path_pre[(1, 0)]))
if best_cost >= cost_pre:
best_path = robot_path_pre
best_cost = cost_pre
print('the total cost of the found path is: ', best_cost, cost, time_record, horizon_record)
if show:
print_red_on_cyan(task.formula)
print_red_on_cyan([init_state, accept_state, buchi.size,
[pruned_subgraph.number_of_nodes(), pruned_subgraph.number_of_edges()],
'A path is found for the case where the accepting state has a self-loop'])
if draw:
vis(workspace, robot_path_pre, {robot: [len(path)] * 2 for robot, path in robot_path_pre.items()},
[])
if one_time:
return
elif len(cost) > number_of_paths:
return
else:
continue
# ======================================= suffix part =================================================#
# #
# #
# #
# ======================================= suffix part =================================================#
# ----------------- infer the poset -----------------------
pruned_subgraph_suf, unpruned_subgraph_suf, paths_suf = buchi.get_subgraph(accept_state, accept_state,
'suffix', last_subtask)
# no suffix graph due to that final locations of prefix part do not satisfy the outgoing edges
# of the accepting vertex
if not pruned_subgraph_suf:
continue
# no paths due to the implication does not hold
if not paths_suf:
continue
edge2element_suf, element2edge_suf = buchi.get_element(pruned_subgraph_suf)
element_component2label_suf = buchi.element2label2eccl(element2edge_suf, pruned_subgraph_suf)
hasse_graphs_suf = buchi.map_path_to_element_sequence(edge2element_suf, paths_suf)
if sys.argv[1] == 'f':
with open('data/poset_suf', 'wb') as filehandle:
pickle.dump(pruned_subgraph_suf, filehandle)
pickle.dump(edge2element_suf, filehandle)
pickle.dump(element2edge_suf, filehandle)
pickle.dump(element_component2label_suf, filehandle)
pickle.dump(hasse_graphs_suf, filehandle)
if sys.argv[1] == 'p':
with open('data/poset_suf', 'rb') as filehandle:
pruned_subgraph_suf = pickle.load(filehandle)
edge2element_suf = pickle.load(filehandle)
element2edge_suf = pickle.load(filehandle)
element_component2label_suf = pickle.load(filehandle)
hasse_graphs_suf = pickle.load(filehandle)
for _, poset_relation_suf, pos_suf, hasse_diagram_suf in hasse_graphs_suf:
if show:
print_red_on_cyan('================ suffix part ================')
for order_suf in poset_relation_suf:
print(pruned_subgraph_suf.edges[element2edge_suf[order_suf[0]]]['formula'], ' -> ',
pruned_subgraph_suf.edges[element2edge_suf[order_suf[1]]]['formula'])
print('----------------------------------------------')
robot2eccl_suf = restricted_poset.element2robot2eccl(pos_suf, element2edge_suf, pruned_subgraph_suf)
incomparable_element_suf, larger_element_suf, smaller_element_suf, strict_larger_element_suf = \
restricted_poset.incomparable_larger(pos_suf, poset_relation_suf, hasse_diagram_suf)
# --------------- construct the routing graph ---------------
minimal_element_suf = [node for node in hasse_diagram_suf.nodes()
if hasse_diagram_suf.out_degree(node) == 0]
init_type_robot_node_suf, element_component_clause_literal_node_suf, \
node_location_type_component_element_suf, \
num_nodes_suf, final_element_type_robot_node \
= restricted_weighted_ts_suffix.construct_node_set(pos_suf, element2edge_suf, pruned_subgraph_suf,
workspace.type_robot_label,
minimal_element_suf, last_subtask, loop)
edge_set_suf = restricted_weighted_ts_suffix.construct_edge_set(pos_suf,
element_component_clause_literal_node_suf,
element2edge_suf, pruned_subgraph_suf,
element_component2label_suf,
init_type_robot_node_suf,
incomparable_element_suf,
strict_larger_element_suf,
larger_element_suf,
buchi.imply,
minimal_element_suf,
final_element_type_robot_node)
ts_suf = restricted_weighted_ts_suffix.construct_graph(num_nodes_suf,
node_location_type_component_element_suf,
edge_set_suf,
workspace.p2p)
# --------------------- MILP -------------------------
maximal_element_suf = [node for node in hasse_diagram_suf.nodes()
if hasse_diagram_suf.in_degree(node) == 0]
robot_waypoint_suf, robot_time_suf, _, robot_label_suf, robot_waypoint_axis_suf, robot_time_axis_suf, \
time_axis_suf, acpt_run_suf \
= restricted_milp_suf.construct_milp_constraint(ts_suf, workspace.type_num, pos_suf,
pruned_subgraph_suf,
element2edge_suf,
element_component_clause_literal_node_suf,
poset_relation_suf, init_type_robot_node_suf,
strict_larger_element_suf, incomparable_element_suf,
larger_element_suf,
robot2eccl_suf, id2robots, accept_state, buchi,
minimal_element_suf, final_element_type_robot_node,
workspace.type_robot_label,
maximal_element_suf, last_subtask, show, loop)
if not robot_waypoint_suf:
continue
for robot, time in list(robot_time_suf.items()):
# delete such robots that did not participate (the initial location of robots may just satisfies)
if time[-1] == 0 and len(time) == 1:
del robot_time_suf[robot]
del robot_waypoint_suf[robot]
if show:
print('----------------------------------------------')
for type_robot, waypoint in robot_waypoint_suf.items():
print(type_robot, " : ", waypoint)
print(type_robot, " : ", robot_time_suf[type_robot])
print(type_robot, " : ", robot_label_suf[type_robot])
print('----------------------------------------------')
print('time axis: ', time_axis_suf)
for robot, time in list(robot_time_axis_suf.items()):
# delete such robots that did not participate (the initial location of robots may just satisfies)
if not time:
del robot_time_axis_suf[robot]
del robot_waypoint_axis_suf[robot]
if show:
for type_robot, waypoint in robot_waypoint_axis_suf.items():
print(type_robot, " : ", waypoint)
print(type_robot, " : ", robot_time_axis_suf[type_robot])
print('----------------------------------------------')
for stage in acpt_run_suf:
print(stage)
print('----------------------------------------------')
robot_path_suf = mapp(workspace, buchi, acpt_run_suf, robot_waypoint_axis_suf,
robot_time_axis_suf, 'simultaneous', show)
# return to initial locations
if not loop:
horizon = workspace.longest_time({robot: path[-1] for robot, path in robot_path_suf.items()},
workspace.type_robot_location)
acpt_run_suf = {'subtask': 'return',
'time_element': [horizon, -1],
'essential_robot_edge': {label: [type_robot]
for type_robot, label in
workspace.type_robot_label.items()},
'essential_clause_edge': last_subtask['essential_clause_edge'],
'neg_edge': last_subtask['neg_edge'],
'essential_robot_vertex': last_subtask['essential_robot_edge'],
'neg_vertex': last_subtask['neg_edge']
}
robot_path_return = return_to_initial(workspace, acpt_run_suf, {robot: path[-1]
for robot, path in
robot_path_suf.items()}
)
for robot, path in robot_path_suf.items():
path += robot_path_return[robot][1:]
robot_path = {robot: path + robot_path_suf[robot][1:] + robot_path_suf[robot][1:] for
robot, path in robot_path_pre.items()}
cost_pre = compute_path_cost(robot_path_pre)
cos_suf = compute_path_cost(robot_path_suf)
end = datetime.now()
if show:
print('total time for the prefix + suffix parts: {0}'.format((end - start).total_seconds()))
cost.append((cost_pre, cos_suf))
time_record.append((end-start).total_seconds())
horizon_record.append((len(robot_path_pre[(1, 0)]), len(robot_path_suf[(1, 0)])))
if best_cost >= cost_pre + cos_suf:
best_path = robot_path
best_cost = cost_pre + cos_suf
print('the total cost of the found path is: ', best_cost, cost, time_record, horizon_record)
if show:
print_red_on_cyan(task.formula)
print_red_on_cyan([init_state, accept_state, buchi.size,
[buchi.buchi_graph.number_of_nodes(), buchi.buchi_graph.number_of_edges()],
([pruned_subgraph.number_of_nodes(), pruned_subgraph.number_of_edges()],
[pruned_subgraph_suf.number_of_nodes(), pruned_subgraph_suf.number_of_edges()]),
'A path is found for the case where the accepting state does not have a self-loop'])
if draw:
vis(workspace, robot_path, {robot: [len(path)] * 2 for robot, path in robot_path.items()}, [])
if one_time:
return
if len(cost) > number_of_paths:
return
return cost
if __name__ == '__main__':
ltl_mrta(int(sys.argv[2]))
# ltl_mrta(4)
| [
"xl214@duke.edu"
] | xl214@duke.edu |
c3d059c6a856c09a0127d8793a81b5c97ef00863 | a3ff8c37e8079412477e203faa2f9526ffb66b7a | /realworld_expt/expt.py | 9d07e0d72ad6f148d9a7608d719a22062f2252cf | [] | no_license | greentfrapp/temp | 07c83aaf08dd236f6305af877280698612129681 | 406864f3c7c2f78c23df2c29b640ba9ea622eb27 | refs/heads/master | 2020-03-29T19:24:48.466126 | 2019-01-30T15:14:10 | 2019-01-30T15:14:10 | 150,261,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,493 | py | from __future__ import print_function
try:
raw_input
except:
raw_input = input
import numpy as np
from keras.models import load_model
import json
import tensorflow as tf
from sklearn.ensemble import IsolationForest
import matplotlib.pyplot as plt
from scipy.stats import chi
from absl import flags
from absl import app
from utils import MammoData as Data
FLAGS = flags.FLAGS
flags.DEFINE_bool("plot", False, "Plot")
flags.DEFINE_bool("train", False, "Train")
flags.DEFINE_integer("std", 10, "std")
flags.DEFINE_integer("iter", 5, "No. of iForest runs per sample set")
flags.DEFINE_integer("samples", 5, "No. of sample sets generated")
def cal_auc(x, y):
return np.trapz(y, x)
def get_dist(values):
center = np.mean(values, axis=0)
std = np.std(values, axis=0)
chi_std = chi.std(2, 0, np.linalg.norm(std))
dist = np.linalg.norm(values - center, axis=1)
for i, el in enumerate(dist):
if el > 2.7 * chi_std:
dist[i] = 0.
elif el < 2.3 * chi_std:
dist[i] = 0.
# dist = np.exp(dist)
dist /= np.sum(dist)
return dist
def roc_val(classifier, x_test, y_test):
predictions = classifier.predict(x_test)
predicted_anomalies = (predictions == -1).astype(np.int32)
tp = np.sum(predicted_anomalies[np.where(y_test == predicted_anomalies)] == 1)
tn = np.sum(predicted_anomalies[np.where(y_test == predicted_anomalies)] == 0)
fp = np.sum(predicted_anomalies) - tp
fn = np.sum(predicted_anomalies == 0) - tn
if tp == 0:
recall = tp_rate = 0.
precision = 1.
else:
recall = tp_rate = tp / (tp + fn)
precision = tp / (tp + fp)
if recall + precision == 0:
f1 = 0.
else:
f1 = (2 * recall * precision) / (recall + precision)
fp_rate = fp / (fp + tn)
return {"TPR": tp_rate, "FPR": fp_rate, "F1": f1}
def generate(n_run):
(x_train, y_train), (x_test, y_test) = dataset.load_data()
x = x_train
y = y_train
latent = encoder.predict(x)
center = np.mean(latent, axis=0)
latent = np.random.randn(synth_size, 2)
for i, vector in enumerate(latent):
latent[i] = 10. * vector / np.linalg.norm(vector)
latent += center
samples = decoder.predict(latent.reshape(-1, 2))
with open(folder + "synthetic_samples_{}.json".format(FLAGS.std, n_run), 'w') as file:
json.dump(samples.tolist(), file)
return samples
def smote(n_run):
(x_train, y_train), (x_test, y_test) = dataset.load_data()
x = x_train
y = y_train
samples = []
for i in np.arange(synth_size):
choice = np.random.choice(np.arange(len(x)))
a = x[choice]
x_copy = np.concatenate((x[:choice], x[choice + 1:]))
x_copy -= a
x_copy = np.linalg.norm(x_copy, axis=1)
b = np.argmin(x_copy)
if b >= choice:
b += 1
b = x[b]
scale = np.random.rand()
c = scale * (a-b) + b
samples.append(list(c))
with open(folder + "smote_reg_data_samples_{}.json".format(FLAGS.std, n_run), 'w') as file:
json.dump(samples, file)
return samples
def expt(n_run):
(x_train, y_train), (x_test, y_test) = dataset.load_data()
x_synth = {
"doping": generate(n_run),
"smote": smote(n_run),
}
x = {
"original": x_train,
}
for synth_type in x_synth:
x[synth_type] = np.concatenate((x_train, x_synth[synth_type]))
stat_types = ["TPR", "FPR", "F1"]
stats = {}
for method in x:
stats[method] = dict(zip(stat_types, [[] for stat in stat_types]))
con_vals = np.arange(0.01, 0.3, 0.02)
con_vals = np.concatenate(([0.001, 0.003, 0.005, 0.007], con_vals))
for i, con_val in enumerate(con_vals):
print("Run #{}/{}".format(i + 1, len(con_vals)))
run_stats = {}
for method in x:
run_stats[method] = dict(zip(stat_types, [[] for stat in stat_types]))
for j in np.arange(FLAGS.iter):
classifiers = {}
for method in x:
classifiers[method] = IsolationForest(contamination=con_val)
classifiers[method].fit(x[method])
results = roc_val(classifiers[method], x_test, y_test)
for stat in results:
run_stats[method][stat].append(results[stat])
for method in stats:
for stat in stat_types:
stats[method][stat].append(np.mean(run_stats[method][stat]))
return stats
def train():
methods = ["original", "doping", "smote"]
stat_types = ["TPR", "FPR", "F1"]
all_stats = {}
for method in methods:
all_stats[method] = dict(zip(stat_types, [[] for stat in stat_types]))
for i in np.arange(FLAGS.samples):
expt_stats = expt(i)
for method in methods:
for stat in stat_types:
all_stats[method][stat].append(expt_stats[method][stat])
for method in methods:
for stat in stat_types:
all_stats[method][stat] = np.mean(all_stats[method][stat], axis=0).tolist()
with open(folder + "stats.json".format(FLAGS.std), 'w') as file:
json.dump(all_stats, file)
def plot(all_stats, methods=None):
f1_list = []
auc_list = []
g_list = []
if methods == None:
methods = all_stats.keys()
for method in methods:
# print("\n" + method)
f1 = np.max(all_stats[method]["F1"])
auc = cal_auc(np.concatenate(([0.0], all_stats[method]["FPR"], [1.0])), np.concatenate(([0.0], all_stats[method]["TPR"], [1.0])))
# print("F1[{}]\t{}".format(np.argmax(all_stats[method]["F1"]), np.max(all_stats[method]["F1"])))
# print("AUC\t{}".format(cal_auc(np.concatenate(([0.0], all_stats[method]["FPR"], [1.0])), np.concatenate(([0.0], all_stats[method]["TPR"], [1.0])))))
f1_list.append([f1, method])
auc_list.append([auc, method])
r = all_stats[method]["TPR"][np.argmax(all_stats[method]["F1"])]
p = f1 * r / (2 * r - f1)
g = (r * p) ** 0.5
# print(2 * p * r / (p + r))
# print(p, r, f1)
g_list.append([g, method])
f1_list.sort(reverse=True)
auc_list.sort(reverse=True)
g_list.sort(reverse=True)
print("\nF1:")
for [f1, method] in f1_list:
print("{}: {}".format(method, f1))
print("\nAUC:")
for [auc, method] in auc_list:
print("{}: {}".format(method, auc))
print("\nG:")
for [g, method] in g_list:
print("{}: {}".format(method, g))
def main(unused_argv):
global desc, folder, synth_size, encoder, decoder, dataset
desc = "aae"
folder = "./expt_std{}_temp2/".format(FLAGS.std)
folder = "./"
tf.gfile.MakeDirs(folder)
synth_size = 1100
encoder = load_model('{}_encoder_{}_test.h5'.format(desc, FLAGS.std))
decoder = load_model('{}_decoder_{}_test.h5'.format(desc, FLAGS.std))
dataset = Data()
if FLAGS.train:
train()
elif FLAGS.plot:
methods = ["original", "doping", "smote"]
stat_types = ["TPR", "FPR", "F1"]
with open(folder + "stats.json".format(FLAGS.std), 'r') as file:
all_stats = json.load(file)
plot(all_stats, methods)
if __name__ == "__main__":
app.run(main)
| [
"limsweekiat@gmail.com"
] | limsweekiat@gmail.com |
c3f87252231909bf4d058b819a5d6f1fa0c34340 | 5c67c6e9d69fd583d0a2c13063788b1a6df782cb | /online-version/defi.py | 5ad2e2567b1c9f7531c762310ae8b01f0b279c6c | [] | no_license | mathyomama/vocab-flash | aa0fbce34c330e27d43a6814b66c8b661a2bd4a8 | a808d54be5027eb9f53edb8805c2c9a246987acc | refs/heads/master | 2016-09-06T01:17:03.238908 | 2013-10-18T05:57:27 | 2013-10-18T05:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import parse
import urllib2
import error
class Definition(object):
"""This class takes a word and uses urllib2 to grab a file which is then parsed using Parse from parser."""
def __init__(self, word):
try:
self.data = urllib2.urlopen("http://en.wiktionary.org/w/index.php?action=raw&title=%s" % word)
self.definition = parse.Parser(self.data)
except urllib2.HTTPError:
self.http_error_message()
def check_word(self, data):
pass
def http_error_message(self):
print error.http_error
def print_entry(self):
print self.definition.get_entry()
| [
"mathyomama@gmail.com"
] | mathyomama@gmail.com |
6682865495c2e982113ba738dec842ce4ec2a54f | 9aa2141c98339bfb66714f00263fd701cec1965a | /main.py | 61e057c8367865d639efed5184efffed2988d7b4 | [] | no_license | mateuszkowalke/stock-predictor | 9b904bf99addd0c5acde6d84c03f6b3e203ce988 | cd767529a7e056ce2e6b32a8dee858d2a558bdb1 | refs/heads/master | 2023-04-08T12:03:14.884185 | 2021-04-17T16:04:02 | 2021-04-17T16:04:02 | 358,923,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,746 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader as web
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
# load data
company = 'FB'
start = dt.datetime(2012, 1, 1)
end = dt.datetime(2020, 1, 1)
data = web.DataReader(company, 'yahoo', start, end)
# prepare dataReader
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data['Close'].values.reshape(-1, 1))
prediction_days = 60
x_train = []
y_train = []
for x in range(prediction_days, len(scaled_data)):
x_train.append(scaled_data[x-prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
print(x_train.shape[1])
# build model
model = Sequential()
model.add(LSTM(units=50, return_sequences=True,
input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, epochs=25, batch_size=32)
''' test on existing data '''
test_start = dt.datetime(2020, 1, 1)
test_end = dt.datetime.now()
test_data = web.DataReader(company, 'yahoo', test_start, test_end)
actual_prices = test_data['Close'].values
total_dataset = pd.concat((data['Close'], test_data['Close']), axis=0)
model_inputs = total_dataset[len(
total_dataset) - len(test_data) - prediction_days:].values
model_inputs = model_inputs.reshape(-1, 1)
model_inputs = scaler.transform(model_inputs)
# predictions on test data
x_test = []
for x in range(prediction_days, len(model_inputs)):
x_test.append(model_inputs[x-prediction_days:x, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predicted_prices = model.predict(x_test)
predicted_prices = scaler.inverse_transform(predicted_prices)
# plot test predictions
plt.plot(actual_prices, color="black", label="Actual price")
plt.plot(predicted_prices, color="green", label="Predicted price")
plt.title(f'{company} Share Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
# predict next day
real_data = [
model_inputs[len(model_inputs) + 1 - prediction_days:len(model_inputs+1), 0]]
real_data = np.array(real_data)
real_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1], 1))
prediction = model.predict(real_data)
prediction = scaler.inverse_transform(prediction)
print(f'Prediction: {prediction}')
| [
"mateusz.kowalke@aurafutures.com"
] | mateusz.kowalke@aurafutures.com |
51488bc065ab15635618159a0ae686e9e2d730bf | e33b7ce46d1c10142712cb59399dbdf6a4dfe1d2 | /test_class/args_handler.py | 8f42be43d4807e3da40a028c17272f2e2c438bcf | [] | no_license | match8969/practice_python | a1d80232c17c373abc7b0aa514a9e17425592137 | fee9a9cef2558e49282b619e3d3ed1d41d679ecf | refs/heads/master | 2020-04-26T07:23:35.788289 | 2019-03-02T02:15:23 | 2019-03-02T02:15:23 | 173,392,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | """
Reference :: https://stackoverflow.com/questions/141545/how-to-overload-init-method-based-on-argument-type
lass MyData:
... def __init__(self, data):
... "Initialize MyData from a sequence"
... self.data = data
...
... @classmethod
... def fromfilename(cls, filename):
... "Initialize MyData from a file"
... data = open(filename).readlines()
... return cls(data)
...
... @classmethod
... def fromdict(cls, datadict):
... "Initialize MyData from a dict's items"
... return cls(datadict.items())
...
MyData([1, 2, 3]).data
[1, 2, 3]
MyData.fromfilename("/tmp/foobar").data
['foo\n', 'bar\n', 'baz\n']
MyData.fromdict({"spam": "ham"}).data
[('spam', 'ham')]
"""
class ArgsHandler(object):
list_args = []
def __init__(self, list_args):
self.list_args = list_args
def count_args(self):
return len(self.list_args)
def to_dict_parsedfile(self):
dict = {}
# TODO Check the args and return the detail
# IMAGE dict = {args1: .csv, args2: txtfile, args3: dir}
return
# test
def show_args(self):
print('list_args is......')
print(self.list_args) | [
"match8969@gmail.com"
] | match8969@gmail.com |
5590f5c97292bf43d4e6e738676096ae1f3baffe | 239d9f4c1929dbde1b86531939c4276e655cf5bd | /1_to_10.py | 5b6a9a6219d422c2d037de0ac93ab969a3d2ba9e | [] | no_license | trishacjames/MyFirstPython | 14c8f5f9387c1d33e89b1977a4c14138d418c4c9 | b4f0fc88394679bf17e0da7ad5be716f98ec6f07 | refs/heads/master | 2020-05-14T08:15:25.500159 | 2019-04-17T19:23:12 | 2019-04-17T19:23:12 | 181,715,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | count = 1
while count <= 10:
print(count)
count += 1
| [
"trishajames@Trishas-MacBook-Air.local"
] | trishajames@Trishas-MacBook-Air.local |
3327edc9b1fd79eac440d1860f7b72ec8890660d | 310599a12c65d9910347fbfe917ee82642d6b2b7 | /mask_rcnn/find_hsv_value.py | 4b4f21ec6f03298499fe75dc151a25846d9bac8b | [
"MIT"
] | permissive | OOIIps/Mask_RCNN | 3988f8f722780ddcc653330d874af569a7ca15ab | b607289d42e9b748e1ca65f76097e2816507ded7 | refs/heads/master | 2020-07-24T04:18:11.741885 | 2019-09-11T11:47:30 | 2019-09-11T11:47:30 | 207,799,027 | 0 | 0 | NOASSERTION | 2019-09-11T11:46:29 | 2019-09-11T11:46:29 | null | UTF-8 | Python | false | false | 1,365 | py | import cv2
import numpy as np
image_hsv = None
pixel = (20,60,80)
max_hsv, min_hsv = np.zeros(3), np.ones(3)*255
# mouse callback function
def pick_color(event,x,y,flags,param):
global max_hsv, min_hsv
if event == cv2.EVENT_LBUTTONDOWN:
pixel = image_hsv[y,x]
#you might want to adjust the ranges(+-10, etc):
upper = np.array([pixel[0] + 10, pixel[1] + 10, pixel[2] + 40])
lower = np.array([pixel[0] - 10, pixel[1] - 10, pixel[2] - 40])
for i in range(3):
if(max_hsv[i]<upper[i]):
max_hsv[i]=upper[i]
if(min_hsv[i]>lower[i]):
min_hsv[i]=lower[i]
image_mask = cv2.inRange(image_hsv,lower,upper)
cv2.imshow("mask",image_mask)
print (max_hsv, min_hsv)
def main():
import sys
global image_hsv, pixel # mouse callback
image_src = cv2.imread(sys.argv[1])
if image_src is None:
print ("File Read Error")
return
cv2.imshow("bgr",image_src)
## NEW ##
cv2.namedWindow('hsv')
cv2.setMouseCallback('hsv', pick_color)
# now click into the hsv img , and look at values:
image_hsv = cv2.cvtColor(image_src,cv2.COLOR_BGR2HSV)
cv2.imshow("hsv",image_hsv)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__=='__main__':
main()
| [
"hanskrupakar1994@gmail.com"
] | hanskrupakar1994@gmail.com |
1b3ed7927b1a3549562ce933a1aa1173925bbf3c | 567a4eb41d2e92c16d7b5c99913e075baf1d63e8 | /python/leetcode/exam4.3.py | 47896ed93e9581a91430631e804a2792aeefadb7 | [] | no_license | sunyikang/skilltraining | 299b342be8d4b720d7856efacfa4e3d823635760 | 3081294fdf8718b0f5b6fa4dbd0fd67fc60cdc24 | refs/heads/master | 2022-12-25T15:44:45.563570 | 2020-04-29T14:56:25 | 2020-04-29T14:56:25 | 153,577,372 | 0 | 0 | null | 2022-12-22T09:41:34 | 2018-10-18T06:55:18 | JavaScript | UTF-8 | Python | false | false | 316 | py | def syrac(n):
while n>1:
if n%2 == 0:
print "{} and {}".format("a1", n)
n=n//2
print "{} and {}".format("a2", n)
else:
print "{} and {}".format("b1", n)
n=3*n +1
print "{} and {}".format("b2", n)
return n
print syrac(9) | [
"sunyikang@gmail.com"
] | sunyikang@gmail.com |
dec281751603425b8397dc65a0ebbd7b8b50ff7f | a564b8277e33eb27009089ec2e216a4d266a8861 | /官方配套代码/15/15.3/Senior/server/CrazyitDict.py | 6fc2e469e9af559f3323a86b8dde9a2555759584 | [
"Unlicense"
] | permissive | yifengyou/crazy-python | 3cb50f462e4ddb921c365e2f0cb3e846e6539383 | 28099bd5011de6981a7c5412783952cc7601ae0c | refs/heads/main | 2023-06-18T18:10:52.691245 | 2021-07-18T14:21:03 | 2021-07-18T14:21:03 | 387,088,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
class CrazyitDict(dict):
# 根据value查找key
def key_from_value(self, val):
# 遍历所有key组成的集合
for key in self.keys():
# 如果指定key对应的value与被搜索的value相同,则返回对应的key
if self[key] == val:
return key
return None
# 根据value删除key
def remove_by_value(self, val):
# 遍历所有key组成的集合
for key in self.keys():
# 如果指定key对应的value与被搜索的value相同,则返回对应的key
if self[key] == val:
self.pop(key)
return
| [
"842056007@qq.com"
] | 842056007@qq.com |
a6740c96f8a8e3414354422c2e5bf6332fd76a4e | b1defdc230d5c06ea397b13a3f7e445914fb9761 | /Chapter_08/src/knock_72.py | 4702a257e10553e7fef2735e49911b4caeb1271b | [] | no_license | t-tagami/100_knock | cd885aed6ebd5f0cc7c544df4cf0ea3f2812ba85 | f79960b290608c05c263862c0da140f9754070e6 | refs/heads/master | 2020-03-20T18:36:47.475278 | 2019-02-27T19:31:46 | 2019-02-27T19:31:46 | 137,596,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | # 72. 素性抽出
# 極性分析に有用そうな素性を各自で設計し,学習データから素性を抽出せよ.素性としては,レビューからストップワードを除去し,
# 各単語をステミング処理したものが最低限のベースラインとなるであろう.
from knock_71 import is_stopword
from nltk.stem.porter import PorterStemmer as PS
import pickle, re
from tqdm import tqdm
def rm_symbol(sent):
return re.sub(symbol, '', sent)
def stemming(sent):
return ' '.join(ps.stem(word) for word in rm_symbol(sent).split() if not is_stopword(word))
def main():
X_words, Y_labels = [], []
with open('work/sentiment.txt', encoding='latin-1') as f:
for line in tqdm(f):
label, sent = line.split(' ', 1)
X_words.append(stemming(sent))
Y_labels.append(label)
with open('work/X_words.pickle', mode='wb') as f, open('work/Y_labels.pickle', mode='wb') as g:
pickle.dump(X_words, f)
pickle.dump(Y_labels, g)
if __name__ == '__main__':
symbol = re.compile(r"[#$%&'()*+-/:;<=>@[\]^_`{|}~”!?\"#$%&’()=~|‘{+*}<>_-^¥@「;:」、。・!?]")
ps = PS()
main() | [
"tagami@ecei.tohoku.ac.jp"
] | tagami@ecei.tohoku.ac.jp |
9e04759332a82f222f84a256886b4bd3e5300456 | e42478c0c501a11280a3b0b3266a931215fd5a34 | /fxdayu_data/handler/base.py | 3e03b7fc7e8eeb0830d6ff42ded200f68ffccb42 | [] | no_license | limingbei/fxdayu_data | d36af819ee32e32e541eaf205b0e1c9309ffc89a | 2d1541def42b31e839e1027a85cfd08665f731a3 | refs/heads/master | 2020-03-17T23:16:37.656128 | 2018-01-05T05:50:41 | 2018-01-05T05:50:41 | 134,038,018 | 1 | 0 | null | 2018-05-19T06:55:59 | 2018-05-19T06:55:59 | null | UTF-8 | Python | false | false | 475 | py | # encoding:utf-8
from datetime import datetime
from pymongo.mongo_client import database
import pandas as pd
import pymongo
class DataHandler(object):
def write(self, *args, **kwargs):
pass
def read(self, *args, **kwargs):
pass
def inplace(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
def delete(self, *args, **kwargs):
pass
def table_names(self, *args, **kwargs):
pass
| [
"862786917@qq.com"
] | 862786917@qq.com |
fb464703e73da00cfc3695c6632d3fa3aaf7c9af | fa2e3a4392a218d3b818b313e59ffe5cf25ae328 | /constants.py | 4d27d757aa9b1bc096da2916c91a5b5434a19526 | [] | no_license | prashantramnani/RL_TermProject | 62df3477d81517989159bb2fb4c0381d2e8c2660 | 78216bfffc8cf8e228e879e740775b62baf89b06 | refs/heads/master | 2023-01-09T06:44:55.225285 | 2020-11-12T16:57:16 | 2020-11-12T16:57:16 | 311,939,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from collections import namedtuple
make_episode = namedtuple('Episode',
field_names=['states',
'actions',
'rewards',
'init_command',
'total_return',
'length',
])
replay_size = 1000
n_warm_up_episodes = 100
last_few = 150
max_reward = 350
hidden_size = 128
learning_rate = 0.0003
return_scale = 0.02
horizon_scale = 0.01
n_main_iter = 1200
n_updates_per_iter = 100
n_episodes_per_iter = 20
batch_size = 768
max_steps = 300
max_steps_reward = -50
evaluate_every = 10
n_evals = 1
stop_on_solved = False
target_return = 200 | [
"ramnani.prashant@gmail.com"
] | ramnani.prashant@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.