blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1ec4ea09252c17ded8ddff29b0cb46e738494e7 | a42dbf73822895c103883a773085a9d0d32b910b | /packages/bmp-sensor/read_barometer.py | 8d8045f9acb5a055b1064cbb241b84602d9bf2b2 | [] | no_license | Tchanu/home-control-back | 4e1724834319084880ae87c1c728eeefc4d7fd24 | 21944603040cd806adddefc8c159aa3a754cadaa | refs/heads/master | 2020-04-08T05:15:31.438259 | 2019-02-17T19:59:46 | 2019-02-17T19:59:46 | 159,053,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #!/usr/bin/python
import sys
import Adafruit_BMP.BMP085 as BMP085
sensor = BMP085.BMP085(mode=BMP085.BMP085_HIGHRES)
print('{')
print(' "temp": {0:0.2f},'.format(sensor.read_temperature()))
print(' "pressure": {0:0.0f},'.format(sensor.read_pressure()))
print(' "altitude": {0:0.0f},'.format(sensor.read_altitude()))
print(' "seaLevelPressure": {0:0.0f}'.format(sensor.read_sealevel_pressure()))
print('}')
sys.stdout.flush()
| [
"t.chanukvadze@gmail.com"
] | t.chanukvadze@gmail.com |
d1447815d97faff47b44f8a1895258fb69c4f969 | 2c8d3e341e813c1b1b88ae824edeaadb366aec0a | /Parser/SW4/SW4/bin/Debug/smo2-25-path-31.py | ebf93cad9a6ff9ab926373c9abce13ff101bc0cb | [] | no_license | kiriphorito/MoveAndTag-Manticore | 2e24a958f4941556b2d2714563718069cc5b208f | d07a3d8c0bacf34cf5f433384a6fd45170896b7a | refs/heads/master | 2021-01-20T11:40:49.232449 | 2017-02-26T14:08:48 | 2017-02-26T14:08:48 | 82,548,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,842 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
u"""
@brief: Path Planning Sample Code with Randamized Rapidly-Exploring Random Trees (RRT)
@author: AtsushiSakai
@license: MIT
"""
import shapely
from shapely.geometry import Polygon, LineString, Point, MultiPoint, GeometryCollection
import matplotlib.pyplot as plt
from ast import literal_eval
import datetime
import random
import math
import copy
def drawRobots(robots):
for (x,y) in robots:
plt.plot(x,y,"o")
def drawPolygonNoFill(points,color):
polygon = plt.Polygon(points,color=color,fill=False)
plt.gca().add_patch(polygon)
def drawPolygon(points):
polygon = plt.Polygon(points)
plt.gca().add_patch(polygon)
def drawPolygons(polygons):
try:
for xs in polygons:
drawPolygon(xs)
except ValueError:
print ("no polygons specified")
def drawPolygonNoFill(points,color):
polygon = plt.Polygon(points,color=color,fill=False)
plt.gca().add_patch(polygon)
def drawPolygonsNoFill(polygons):
try:
for xs in polygons:
drawPolygonNoFill(xs,'red')
except ValueError:
print ("no polygons specified")
class RRT():
u"""
Class for RRT Planning
"""
def __init__(self, start, goal, obstacleList,randArea,expandDis=1.0,goalSampleRate=5,maxIter=500):
u"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start=Node(start[0],start[1])
self.end=Node(goal[0],goal[1])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
def Planning(self,animation=True):
u"""
Pathplanning
animation: flag for animation on or off
"""
self.nodeList = [self.start]
while True:
# Random Sampling
if random.randint(0, 100) > self.goalSampleRate:
rnd = [random.uniform(self.minrand, self.maxrand), random.uniform(self.minrand, self.maxrand)]
else:
rnd = [self.end.x, self.end.y]
# Find nearest node
nind = self.GetNearestListIndex(self.nodeList, rnd)
# print(nind)
# expand tree
nearestNode =self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = copy.deepcopy(nearestNode)
newNode.x += self.expandDis * math.cos(theta)
newNode.y += self.expandDis * math.sin(theta)
newNode.parent = nind
if not self.__CollisionCheck(newNode, obstacleList,nearestNode):
continue
self.nodeList.append(newNode)
# check goal
dx = newNode.x - self.end.x
dy = newNode.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expandDis:
if not self.__CollisionCheck(newNode, obstacleList,self.end):
continue
else:
#print("Goal!!")
break
if animation:
self.DrawGraph(rnd)
path=[[self.end.x,self.end.y]]
lastIndex = len(self.nodeList) - 1
while self.nodeList[lastIndex].parent is not None:
node = self.nodeList[lastIndex]
path.append([node.x,node.y])
lastIndex = node.parent
path.append([self.start.x, self.start.y])
return path
def DrawGraph(self,rnd=None):
u"""
Draw Graph
"""
import matplotlib.pyplot as plt
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [node.y, self.nodeList[node.parent].y], "-g")
# plt.plot([ox for (ox,oy,size) in obstacleList],[oy for (ox,oy,size) in obstacleList], "ok", ms=size * 20)
drawPolygons(obstacleList)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
plt.axis()
plt.grid(True)
plt.pause(0.01)
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def __CollisionCheck(self, node,obstacleList,nearestNode):
x1 = nearestNode.x
y1 = nearestNode.y
x2 = node.x
y2 = node.y
first = [x1,y1]
second = [x2,y2]
return LineCollisionCheck(first,second,obstacleList)
def LineCollisionCheck(first,second, obstacleList):
from shapely import geometry,wkt
EPS = 1.2e-16 #======= may need to change this value depending on precision
x1 = first[0]
y1 = first[1]
x2 = second[0]
y2 = second[1]
line = geometry.LineString([(x1,y1),(x2,y2)])
#============ changed here =======
# for p1 in obstacleList:
#
# poly = geometry.Polygon(p1)
# ips = line.intersection(poly.boundary)
## print ips
# if type(ips) is Point:
## print "hello"
# if ips.distance(poly) < EPS:
## print "INTERSECT"
# return False
# elif type(ips) is MultiPoint:
# for i in ips:
# if (i.distance(poly) <EPS):
## print "INTERSECT2"
# return False
# elif type(ips) is GeometryCollection:
# continue
# else:
# print (ips,type(ips))
# return False
# return True
#============ changed here =======
for poly in obstacleList:
p1 = Polygon(poly)
if p1.buffer(EPS).intersects(line):
# print "collision"
return False
# print "safe"
return True
#============ changed here =======
def supersmoothie(smoothie,obstacleList):
path = smoothie
state = True
counter1 = 0
counter2 = len(path)-1
while state:
counter2 = len(path)-1
if counter1 == counter2:
state = False
break
coord1 = path[counter1]
for counter in range(counter2,0,-1):
coord2 = path[counter]
if LineCollisionCheck(coord1,coord2,obstacleList): #if no obstacle
del path[(counter1+1):(counter)]
break
counter1 += 1
return path
class Node():
u"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.parent = None
def rrtpath(obstacles,startcoord,goalcoord,randAreas):
rrt = RRT(start=startcoord, goal=goalcoord,randArea = randAreas, obstacleList=obstacles)
path= rrt.Planning(animation=False)
# rrt.DrawGaph()
# plt.plot([x for (x,y) in path], [y for (x,y) in path],'-r')
# print path
smoothiePath = supersmoothie(path,obstacles)
plt.plot([x for (x,y) in smoothiePath], [y for (x,y) in smoothiePath],'-r')
smoothiePath.reverse()
#print smoothiePath
return smoothiePath
obstacleList = [[(0.11019254188514943,-0.033252198573121027),(0.08691070491878392,2.9666574594111776),(-0.913059181075982,2.958896847089056),(-0.9208197933981044,3.9588667330838216),(1.0791199785914285,3.9743879577280654),(1.1567261018126462,-6.025310902219596),(3.1566658738021784,-6.009789677575353),(3.07905975058096,3.9899091823723096),(4.079029636575727,3.9976697946944317),(4.257523719984532,-19.00163758318519),(2.2575839479949953,-19.017158807829432),(2.179977824773777,-9.017459947881774),(0.1800380527842449,-9.032981172526018),(0.2576441760054621,-19.032680032473678),(-0.7423257099892999,-19.040440644795797),(-0.7888893839220328,-13.040621328827205),(-2.7888291559115723,-13.056142553471448),(-2.773307931267319,-15.056082325460983),(-4.773247703256854,-15.071603550105223),(-4.765487090934737,-16.071573436099985),(-2.7655473189452007,-16.056052211455746),(-2.7577867066230746,-17.056022097450512),(-5.757696364607374,-17.079303934416878),(-5.780978201573743,-14.07939427643258),(-6.78094808756851,-14.0871548887547),(-6.757666250602139,-17.087064546738997),(-7.757636136596918,-17.094825159061116),(-7.749875524274789,-18.094795045055882),(-2.75002609430096,-18.055991983445278),(-2.742265481978834,-19.055961869440043),(-25.741572859858454,-19.234455952848847),(-25.695009185925723,-25.23427526881744),(-18.695219983962367,-25.179950982562588),(-18.291668143212032,-77.17838505429044),(-5.292059625280101,-77.07749709410284),(-5.695611466030389,-25.079063022375006),(4.304087393917248,-25.00145689915379),(4.350651067849983,-31.001276215122388),(9.35050049782383,-30.962473153511773),(9.078879066549558,4.036472856305042),(11.078818838539092,4.051994080949282),(11.063297613894846,6.051933852938816),(5.063478297926248,6.0053701790060865),(5.016914623993522,12.005189494974681),(4.0169447379987515,11.99742888265256),(4.063508411931484,5.997609566683963),(2.06356863994195,5.98208834203972),(2.02476557833134,10.981937772013552),(1.0247956923365782,10.974177159691429),(1.063598753947185,5.974327729717598),(-0.9363410180423481,5.958806505073355),(-0.9441016303644705,6.958776391068121),(-1.9440715163592361,6.951015778745999),(-1.928550291714992,4.951076006756467),(-3.9284900637045257,4.935554782112222),(-4.006096186925744,14.935253642059884),(-6.006035958915275,14.91973241741564),(-5.990514734271034,12.919792645426108),(-8.990424392255333,12.896510808459741),(-8.982663779933207,11.896540922464977),(-5.98275412194891,11.919822759431343),(-5.9284298356940575,4.920033557467978),(-8.928339493678356,4.896751720501613),(-8.920578881356233,3.896781834506848),(-1.9207896793928703,3.9511061207617),(-1.9130290670707477,2.9511362347669343),(-2.9129989530655145,2.9433756224448118),(-2.9052383407433933,1.9434057364500452),(-6.905117884722457,1.9123632871615595),(-6.897357272400336,0.9123934011667933),(-4.897417500410803,0.9279146258110347),(-4.881896275766559,-1.0720251461784969),(-7.881805933750857,-1.0953069831448634),(-7.912848383039345,2.904572560834203),(-8.912818269034112,2.896811948512081),(-8.89729704438987,0.8968721765225512),(-12.897176588368934,0.8658297272340604),(-12.889415976046811,-0.13414015876070418),(-8.889536432067747,-0.10309770947221705),(-8.881775819745625,-1.1030675954669855),(-12.88165536372469,-1.1341100447554688),(-12.866134139080446,-3.134049816745008),(-4.866375051122317,-3.0719649181680286),(-4.858614438800193,-4.071934804162796),(-3.8586445528054285,-4.0641741918406735),(-3.8974476144160373,0.9356752381331583),(-2.897477728421271,0.9434358504552797),(-2.8897171160991495,-0.05653403553948637),(-1.889747230104383,-0.04877342321736459),(-1.8431835561716523,-6.0485927391859615),(-0.8432136701768869,-6.040832126863839),(-0.8897773441096168,-0.04101281089524281)],[(17.400649320507974,-33.71949229515208),(18.356933127973114,-34.011932510546565),(18.06449291257861,-34.96821631801171),(16.151925297648344,-34.38333588722271),(15.859485082253842,-35.33961969468785),(17.772052697184144,-35.92450012547683),(17.187172266395162,-37.83706774040712),(11.44946942160431,-36.08242644804017),(11.157029206209828,-37.03871025550529),(13.06959682114011,-37.62359068629431),(12.777156605745628,-38.57987449375945),(9.908305183350187,-37.702553847575956),(11.078066044928146,-33.87741861771543),(12.990633659858421,-34.46229904850438),(13.283073875252924,-33.506015241039265),(11.370506260322653,-32.921134810250265),(11.662946475717142,-31.96485100278512),(10.706662668252001,-31.67241078739064),(9.536901806674017,-35.49754601725115),(4.755482769348346,-34.03534494027877),(4.463042553953832,-34.991628747743846),(9.244461591279531,-36.453829824716315),(8.952021375885046,-37.41011363218147),(1.3017509161639325,-35.07059190902549),(0.716870485374951,-36.98315952395579),(8.367140945096075,-39.32268124711176),(5.442738791151182,-48.8855193217632),(7.3553064060814926,-49.47039975255218),(7.6477466214759655,-48.514115945087),(30.59855800063948,-55.532681114554634),(32.35319929300634,-49.79497826976387),(9.402387913842906,-42.776413100296196),(10.27970856002635,-39.907561677900745),(12.192276174956648,-40.49244210868974),(11.607395744167675,-42.40500972362003),(12.563679551632866,-42.697449939014525),(14.025880628605243,-37.9160309016888),(14.982164436070397,-38.2084711170833),(13.519963359097959,-42.989890154409004),(14.47624716656309,-43.28233036980349),(15.938448243535541,-38.500911332477784),(16.89473205100068,-38.79335154787228),(16.0174114048172,-41.66220297026767),(16.973695212282358,-41.95464318566218),(17.558575643071347,-40.0420755707319),(31.90283275504851,-44.4286788016491),(32.195272970442986,-43.47239499418396),(35.0641243928384,-44.349715640367435),(35.35656460823287,-43.39343183290226),(32.48771318583744,-42.516111186718845),(32.780153401231956,-41.559827379253676),(18.435896289254792,-37.17322414833647),(18.728336504649285,-36.21694034087133),(25.422323156905275,-38.26402184863272),(25.714763372299764,-37.307738041167596),(19.02077672004377,-35.26065653340619),(19.313216935438252,-34.30437272594105),(20.26950074290339,-34.596812941335536),(20.561940958297885,-33.640529133870395),(23.4307923806933,-34.517849780053844),(23.723232596087787,-33.56156597258868),(22.766948788622653,-33.26912575719423),(23.351829219411623,-31.35655814226395),(28.133248256737332,-32.81875921923636),(27.255927610553876,-35.6876106416318),(28.212211418019017,-35.98005085702621),(29.089532064202466,-33.11119943463082),(30.045815871667585,-33.40363965002528),(30.33825608706207,-32.4473558425602),(28.4256884721318,-31.86247541177121),(36.61401450317736,-5.086528802747274),(35.65773069571233,-4.794088587352828),(37.412371988079315,0.9436142574379787),(35.499804373148976,1.5284946882269566),(33.745163080782035,-4.209208156563875),(32.78887927331691,-3.9167679411693292),(24.600553242271232,-30.692714550193266),(23.644269434806105,-30.40027433479883),(24.229149865595083,-28.487706719868523),(23.27286605812995,-28.195266504474027),(21.810664981157505,-32.97668554179973),(20.85438117369236,-32.684245326405254),(21.14682138908686,-31.72796151894012),(20.190537581621708,-31.435521303545634),(21.36029844319967,-27.61038607368509),(20.404014635734516,-27.317945858290575),(19.819134204945556,-29.23051347322086),(15.993998975085022,-28.06075261164291),(15.7015587596905,-29.01703641910807),(19.526693989551056,-30.186797280686015),(19.234253774156574,-31.14308108815115),(18.277969966691433,-30.850640872756657),(17.98552975129694,-31.8069246802218),(16.072962136366687,-31.222044249432816),(15.780521920972184,-32.17832805689796),(17.69308953590246,-32.763208487686946)],[(-27.8904877290891,34.81391505870865),(-28.279435699813977,35.73517478108286),(-25.515656532691374,36.90201869325751),(-24.348812620516718,34.13823952613488),(-23.427552898142515,34.52718749685978),(-24.59439681031717,37.29096666398239),(-23.673137087942976,37.67991463470728),(-24.062085058667854,38.601174357081476),(-24.98334478104205,38.212226386356605),(-25.761240722491845,40.054745831105016),(-20.233682388246606,42.38843365545433),(-17.899994563897323,36.860875321209086),(-16.978734841523107,37.249823291934),(-19.31242266587241,42.77738162617921),(-18.39116294349821,43.16632959690406),(-15.279579177699128,35.79625181791047),(-14.358319455324942,36.18519978863538),(-15.914111338224446,39.87023867813215),(-6.701514114482432,43.75971838538102),(-7.479410055932242,45.60223783012951),(-16.692007279674264,41.71275812288061),(-17.469903221124042,43.55527756762901),(-16.548643498749826,43.9442255383539),(-17.326539440199557,45.78674498310227),(-26.539136663941616,41.8972652758534),(-26.928084634666504,42.81852499822757),(-22.32178602279548,44.76326485185205),(-22.710733993520368,45.684524574226245),(-27.31703260539138,43.739784720601826),(-27.705980576116247,44.661044442976014),(-28.627240298490452,44.27209647225115),(-25.90460450341627,37.82327841563171),(-26.82586422579045,37.43433044490683),(-29.54850002086466,43.88314850152625),(-30.469759743238875,43.49420053080138),(-27.74712394816466,37.04538247418194),(-28.668383670538866,36.65643450345706),(-29.05733164126375,37.57769422583126),(-29.978591363637953,37.18874625510637),(-31.534383246537498,40.873785144603175),(-32.455642968911704,40.48483717387829),(-32.06669499818682,39.56357745150411),(-32.987954720561014,39.17462948077922),(-34.93269457418548,43.78092809265021),(-35.85395429655964,43.39198012192535),(-35.465006325834764,42.47072039955114),(-38.228785492957385,41.303876487376456),(-41.34036925875644,48.67395426637012),(-2.647460919039897,65.00976903681531),(-4.981148743389372,70.53732737106061),(-43.67405708310577,54.20151260061539),(-44.451953024555564,56.04403204536375),(-46.29447246930391,55.26613610391398),(-40.07130493770577,40.52598054592673),(-40.99256466007997,40.13703257520186),(-42.54835654297955,43.82207146469866),(-43.46961626535371,43.43312349397376),(-41.9138243824542,39.74808460447685),(-42.8350841048284,39.359136633751994),(-42.05718816337861,37.51661718900366),(-34.68711038438499,40.628200954802736),(-33.90921444293523,38.7856815100543),(-35.75173388768364,38.00778556860452),(-35.362785916958735,37.086525846230344),(-31.677747027461923,38.64231772912991),(-30.899851086012156,36.7997982843815),(-31.821110808386365,36.4108503136566),(-31.43216283766148,35.48959059128242),(-33.274682282409884,34.71169464983262),(-32.88573431168499,33.79043492745841),(-31.043214866936594,34.56833086890819),(-30.65426689621171,33.64707114653399),(-29.733007173837493,34.036019117258896),(-29.34405920311262,33.11475939488467),(-31.186578647861026,32.33686345343491),(-30.79763067713613,31.415603731060706),(-28.955111232387733,32.19349967251048),(-28.566163261662844,31.272239950136274),(-31.329942428785444,30.105396037961597),(-30.940994458060565,29.184136315587402),(-30.019734735686363,29.573084286312287),(-29.24183879423656,27.73056484156389),(-31.084358238984997,26.95266890011412),(-30.695410268260105,26.031409177739903),(-28.85289082351174,26.80930511918972),(-28.074994882061933,24.96678567444129),(-27.153735159687795,25.35573364516616),(-29.09847501331218,29.962032257037183),(-28.177215290937994,30.350980227762065),(-27.788267320213105,29.42972050538787),(-26.867007597838874,29.81866847611274),(-28.03385151001354,32.582447643235355),(-27.112591787639342,32.971395613960254),(-24.389955992565167,26.52257755734081),(-23.46869627019095,26.91152552806569),(-23.857644240915903,27.83278525043988),(-22.936384518541637,28.22173322116478),(-22.158488577091916,26.37921377641633),(-28.607306633711318,23.656577981342195),(-28.21835866298636,22.735318258967986),(-25.454579495863822,23.902162171142653),(-16.119828198466706,1.791928834161716),(-15.19856847609255,2.180876804886566),(-14.420672534642883,0.33835736013809026),(-13.499412812268504,0.7273053308630466),(-14.277308753718303,2.5698247756114654),(-13.356049031344055,2.9587727463363684),(-22.690800328741215,25.06900608331726),(-21.769540606367023,25.457954054042155),(-20.99164466491725,23.61543460929374),(-20.07038494254305,24.004382580018614),(-20.848280883992782,25.84690202476706),(-6.108125326005531,32.07006955636505),(-6.886021267455273,33.9125890011135),(-21.626176825442563,27.68942146951544),(-22.015124796167438,28.610681191889654),(-18.33008590667061,30.166473074789167),(-18.719033877395496,31.08773279716339),(-24.246592211640706,28.754044972814125),(-24.63554018236561,29.675304695188323),(-20.950501292868793,31.23109657808784),(-21.72839723431854,33.07361602283625),(-25.41343612381538,31.517824139936707),(-26.191332065265133,33.360343584685126),(-25.270072342890934,33.74929155540999),(-25.659020313615812,34.67055127778419),(-28.422799480738423,33.50370736560957),(-28.8117474514633,34.42496708798378)],[(-27.771470721193953,-5.366684866522082),(-27.175196946980105,-4.56390370942834),(-26.372415789886354,-5.160177483642189),(-38.29789127416335,-21.21580062551707),(-36.69232895997574,-22.408348173944837),(-39.0774240568311,-25.61947280231984),(-43.09132984229987,-22.638103931250594),(-43.6876036165138,-23.44088508834428),(-41.27926014523254,-25.229706410985862),(-42.47180769366026,-26.835268725173332),(-41.669026536566406,-27.43154249938725),(-40.47647898813871,-25.82598018519974),(-39.67369783104494,-26.422253959413567),(-40.26997160525875,-27.225035116507303),(-39.46719044816497,-27.821308890721202),(-35.88954780288201,-23.004621948158647),(-35.0867666457882,-23.600895722372556),(-23.161291161511354,-7.545272580497544),(-21.555728847323856,-8.737820128925245),(-20.959455073110004,-7.93503897183145),(-22.565017387297516,-6.742491423403784),(-19.58364851622831,-2.7285856379350357),(-20.386429673322077,-2.132311863721204),(-23.367798544391263,-6.1462176491899285),(-24.973360858578765,-4.95367010076225),(-21.991991987509536,-0.93976431529354),(-22.79477314460329,-0.34349054107968247),(-25.77614201567251,-4.357396326548434),(-26.57892317276626,-3.761122552334598),(-25.982649398552407,-2.9583413952408373),(-26.78543055564616,-2.36206762102699),(-24.99660923300462,0.04627585025423819),(-25.799390390098374,0.6425496244680939),(-27.5882117127399,-1.765793846813152),(-28.39099286983365,-1.169520072599303),(-28.987266644047494,-1.9723012296930542),(-30.592828958235007,-0.7797536812653636),(-26.418912538738066,4.839714418390868),(-27.22169369583178,5.435988192604713),(-31.39561011532875,-0.18347990705152029),(-32.198391272422455,0.4127938671623559),(-28.024474852925543,6.0322619668185755),(-28.827256010019234,6.628535741032415),(-29.423529784233107,5.82575458393868),(-30.226310941326865,6.422028358152515),(-29.033763392899164,8.02759067234),(-26.625419921617944,6.238769349698453),(-26.029146147404088,7.041550506792195),(-26.83192730449791,7.637824281006067),(-25.63937975607017,9.243386595193547),(-26.44216091316392,9.839660369407376),(-27.634708461591625,8.234098055219894),(-28.43748961868525,8.830371829433789),(-26.64866829604389,11.238715300714976),(-27.45144945313757,11.834989074928824),(-31.029092098420595,7.018302132366371),(-32.63465441260817,8.210849680794025),(-28.460737993111188,13.83031778045028),(-29.263519150204868,14.42659155466411),(-30.45606669863263,12.821029240476609),(-32.06162901282012,14.013576788904285),(-32.65790278703394,13.210795631810608),(-31.05234047284648,12.018248083382911),(-32.24488802127419,10.412685769195395),(-35.45601264964919,12.797780866050772),(-33.0709175527938,16.008905494425765),(-34.676479866981175,17.20145304285352),(-37.061574963836605,13.990328414478505),(-37.86435612093037,14.586602188692364),(-38.46062989514427,13.783821031598581),(-32.841161795488034,9.609904612101637),(-33.43743556970185,8.807123455007897),(-35.04299788388926,9.99967100343565),(-35.639271658103176,9.196889846341875),(-30.019803558446945,5.022973426844922),(-30.616077332660847,4.220192269751175),(-37.03832658941073,8.990382463461998),(-37.63460036362464,8.187601306368236),(-36.83181920653099,7.59132753215431),(-38.02436675495862,5.985765217966858),(-42.03827254042737,8.967134089036056),(-42.634546314641206,8.164352931942323),(-41.02898400045372,6.971805383514647),(-42.221531548881416,5.36624306932713),(-47.84099964853766,9.540159488824001),(-48.43727342275152,8.737378331730278),(-42.81780532309524,4.563461912233382),(-43.41407909730913,3.7606807551396173),(-41.80851678312159,2.568133206711872),(-39.423421686266124,5.779257835086936),(-38.620640529172455,5.182984060873142),(-39.81318807760015,3.577421746685582),(-39.01040692050641,2.981147972471714),(-36.02903804943719,6.995053757940488),(-31.212351106874692,3.4174111126574207),(-31.808624881088534,2.6146299555636823),(-35.8225306665573,5.595998826632911),(-36.41880444077117,4.7932176695391675),(-32.404898655302404,1.8118487984699163),(-33.001172429516224,1.0090676413761859),(-33.80395358660998,1.6053414155900283),(-34.400227360823855,0.802560258496257),(-29.583540418261343,-2.775082386786786),(-30.179814192475185,-3.577863543880526),(-29.37703303538143,-4.17413731809439),(-30.56958058380914,-5.77969963228188),(-36.99182984055912,-1.0095094385711478),(-37.588103614772955,-1.812290595664865),(-31.16585435802299,-6.582480789375623),(-31.762128132236843,-7.385261946469354),(-37.38159623189308,-3.2113455269724698),(-37.97787000610691,-4.014126684066204),(-32.35840190645068,-8.188043103563114),(-32.954675680664536,-8.990824260656849),(-38.57414378032075,-4.816907841159947),(-39.17041755453461,-5.619688998253702),(-38.367636397440855,-6.215962772467556),(-43.13782659115161,-12.638212029217549),(-42.33504543405788,-13.234485803431369),(-37.5648552403471,-6.812236546681382),(-36.76207408325336,-7.408510320895228),(-42.12853805117801,-14.633540734738897),(-66.21197276399042,3.2546724916762706),(-69.1933416350597,-0.7592332937924118),(-45.109906922247205,-18.647446520207673),(-45.70618069646106,-19.450227677301424),(-43.297837225179784,-21.239048999942995),(-34.35373061197211,-9.197331643536794),(-33.55094945487837,-9.793605417750598),(-34.14722322909223,-10.596386574844338),(-33.344442071998465,-11.192660349058208),(-28.574251878287697,-4.770411092308235)],[(66.60322623182422,37.83233353631165),(66.86955455022202,36.86845116443247),(62.050142690826235,35.5368095724435),(62.31647100922399,34.572927200564365),(67.1358828686198,35.904568792553334),(67.40221118701763,34.940686420674155),(68.36609355889674,35.20701473907194),(68.89875019569241,33.27924999531365),(66.97098545193403,32.74659335851803),(67.23731377033185,31.78271098663886),(69.16507851409017,32.31536762343446),(69.69773515088579,30.38760287967618),(67.76997040712747,29.854946242880562),(68.03629872552533,28.89106387100143),(69.96406346928363,29.423720507797),(70.23039178768136,28.45983813591784),(71.19427415956063,28.726166454315663),(70.92794584116282,29.690048826194847),(71.89182821304193,29.956377144592583),(72.42448484983761,28.02861240083436),(67.60507299044173,26.69697080884527),(67.87140130883952,25.73308843696612),(72.6908131682354,27.064730028955168),(72.95714148663326,26.10084765707596),(67.17384725535818,24.50287774668909),(67.44017557375598,23.53899537480997),(73.22346980503109,25.136965285196865),(73.75612644182655,23.209200541438452),(66.04506746679321,21.07857399425607),(66.31139578519104,20.114691622376846),(74.02245476022445,22.245318169559347),(74.55511139702018,20.317553425801016),(64.91628767822829,17.65427024182299),(63.051989449443816,24.40144684497709),(66.90751893696033,25.466760118568324),(66.37486230016475,27.394524862326627),(65.41097992828563,27.1281965439288),(64.07933833629664,31.947608403324573),(66.00710308005493,32.48026504012025),(65.7407747616571,33.44414741199942),(63.813010017898755,32.911490775203816),(63.54668169950101,33.87537314708295),(62.58279932762181,33.60904482868517),(64.44709755640639,26.861868225531005),(62.51933281264812,26.329211588735383),(62.253004494250334,27.29309396061457),(60.32523975049196,26.760437323818962),(62.98852293447001,17.12161360502734),(62.02464056259083,16.85528528662948),(61.49198392579535,18.783050030387837),(60.5281015539161,18.516721711990076),(61.06075819071167,16.588956968231763),(60.096875818832764,16.32262864983388),(34.26302893424553,109.81921872211258),(35.22691130612524,110.08554704051045),(37.35753785330739,102.37448806547728),(38.321420225186074,102.64081638387485),(36.19079367800411,110.35187535890825),(38.118558421762145,110.88453199570381),(46.64106461049202,80.04029609557071),(50.49659409800853,81.10560936916178),(41.97408790927844,111.94984526929478),(48.72126451243287,113.81414349807942),(50.05290610442185,108.99473163868366),(51.01678847630103,109.26105995708157),(49.68514688431203,114.08047181647736),(51.612911628070144,114.61312845327299),(51.08025499127519,116.54089319703134),(59.7551963381871,118.93784806261161),(59.48886801978925,119.90173043449072),(67.19992699482307,122.03235698167308),(66.93359867642512,122.99623935355213),(62.114186817029264,121.66459776156334),(60.24988858824444,128.41177436471747),(59.286006216365,128.1454460463197),(61.15030444514988,121.3982694431654),(59.222539701391824,120.86561280637002),(58.95621138299349,121.82949517824906),(50.28127003608134,119.43254031266873),(50.01494171768351,120.39642268454783),(37.48447088325406,116.93415454537633),(33.22321778888931,132.35627249544302),(29.367688301372745,131.29095922185172),(33.62894139573811,115.86884127178551),(32.66505902385826,115.60251295338733),(32.39873070546105,116.56639532526683),(19.86825987103122,113.10412718609487),(47.566404984403626,12.86036051066236),(33.108169406216255,8.865435734695122),(31.2438711774316,15.612612337849285),(30.279988805552385,15.34628401945147),(32.14428703433696,8.59910741629757),(28.28875754682047,7.533794142706228),(29.886727457207193,1.750499911431234),(76.15308130740695,14.534259194526118),(78.81636449138486,4.895435475734402),(71.1053055163516,2.7648089285518935),(71.37163383474953,1.8009265566728558),(79.08269280978259,3.931553103855272),(79.34902112818075,2.967670731976213),(83.20455061569726,4.032984005567471),(75.74735770055861,31.021690418183844),(78.63900481619608,31.820675373377227),(77.57369154260483,35.67620486089393),(69.86263256757152,33.54557831371141),(69.32997593077593,35.47334305746974),(70.29385830265511,35.73967137586751),(70.02752998425731,36.70355374774671),(73.88305947177395,37.76886702133788),(73.61673115337614,38.732749393217155),(71.68896640961782,38.200092756421505),(71.42263809122005,39.16397512830065),(78.16981469437415,41.028273357085205),(77.90348637597637,41.99215572896446),(71.15630977282223,40.1278575001798),(70.88998145442444,41.091739872058994),(69.92609908254529,40.82541155366117),(70.72508403773865,37.93376443802369),(69.7612016658595,37.66743611962587),(69.49487334746173,38.63131849150502),(68.53099097558253,38.364990173107245),(67.46567770199141,42.220519660623886),(82.88779565205793,46.481772754988825),(82.35513901526232,48.40953749874709),(81.39125664338313,48.14320918034928),(81.12492832498529,49.107091552228496),(83.05269306874374,49.63974818902393),(89.44457271029106,26.506571263924435),(90.40845508217015,26.77289958232209),(91.74009667415915,21.953487722926226),(92.70397904603817,22.219816041323845),(91.37233745404927,27.03922790071985),(92.33621982592857,27.30555621911771),(91.53723487073505,30.197203334755223),(145.5146476959681,45.111589165032285),(141.78605123839876,58.60594237134066),(87.80863841316581,43.6915565410633),(85.94434018438113,50.438733144217494),(91.72763441565608,52.036703054604324),(93.32560432604294,46.25340882332941),(94.28948669792207,46.51973714172729),(92.69151678753533,52.30303137300201),(97.51092864693115,53.63467296499103),(98.30991360212457,50.74302584935354),(99.27379597400373,51.00935416775104),(98.4748110188102,53.90100128338901),(100.40257576256856,54.43365792018443),(99.07093417057962,59.25306977958029),(79.79328673299639,53.926503411624225),(79.26063009620081,55.85426815538253),(78.29674772432149,55.58793983698478),(80.42737427150402,47.87688086195143),(79.46349189962483,47.61055254355367),(62.418479522165164,109.29902434382004),(54.707420547131996,107.16839779663758),(71.75243292459157,45.47992599637123),(66.9330210651958,44.14828440438225),(66.66669274679789,45.1121667762614),(65.70281037491883,44.84583845786359),(67.56710860370339,38.098661854709455)]]
rand = (-85,161)
content = ""
starttime = datetime.datetime.now()
print "Path 31 of 109"
path = []
start = (29.678455265696826,-38.64042004355886)
goal = (-0.9114987590597821,-59.597094142558504)
print " Node 1 and 2 of 2"
path += rrtpath(obstacleList,start,goal,rand)
pathStr = str(path)[1:-1] + ";"
pathStr = pathStr.replace("[", "(")
pathStr = pathStr.replace("]", ")")
f = open('smo2sol-25-path-31.txt', 'a+')
f.write(pathStr)
f.close
| [
"zcabwhy@ucl.ac.uk"
] | zcabwhy@ucl.ac.uk |
d64342467dbba25fac432d2520982c38c5b93302 | 6edeca2f39f52a230b0d540aab823db3045fbee8 | /28.py | cb1ad98b335c7cc760bace1639ddd6952dac9dd9 | [] | no_license | pavankumar2203/InterviewcakeSolutions | e45856072beb353c55041fbb48b550f82907e4c1 | 416db586befc2a3e2c57aa8c8112ec0c94b0b895 | refs/heads/master | 2021-01-10T05:25:20.439029 | 2016-01-26T05:18:59 | 2016-01-26T05:18:59 | 50,405,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | def getcloseparan(s, openindex):
openbrackets = 0
s = list(s)
for i,j in enumerate(s):
if i > openindex:
if j == "(":
openbrackets += 1
elif j == ")":
if openbrackets == 0:
return i
else:
openbrackets -= 1
return None
if __name__ == "__main__":
print(getcloseparan("(hello(morning)world)he he", 1))
| [
"pavankumar2203@gmail.com"
] | pavankumar2203@gmail.com |
4e0a7f8d9a86b8bf6efd82b4c4a9915a8ca162dd | 45794325a8bff57a667becdbc9fcbb635019f4f2 | /src/kendama/scripts/227c_robot.py | 2525015696927dd9b914f07c569c7c4362f84ce1 | [] | no_license | tonyhzheng/ur5e_robotiq_3f_gripper | 26277c02ef7eaca94d416e3b62b1010d81bdfd16 | aa7119332d9828cf953e37197403da5c877b9312 | refs/heads/master | 2020-05-14T11:22:50.764577 | 2019-05-18T22:25:45 | 2019-05-18T22:25:45 | 181,776,306 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,497 | py | #!/usr/bin/env python
"""
This script uses sensor inputs from the LIDAR and 3D cameras to find objects in a new environment. State updates are determined from the turtlebot IMU and enconders.
Commands to the arm are sent to the turtlebot using the MoveIt Python API (http://docs.ros.org/kinetic/api/moveit_tutorials/html/).
Written by Tony Zheng in Fall 2018 at University of California Berkeley in partnership with Siemens.
Supervised by Professor Francesco, Borrelli
"""
import rospy
import time
import ctypes
import struct
import sys, select, termios, tty
import copy
import tf
#import moveit_commander
#import moveit_msgs.msg
import geometry_msgs.msg
from cvxopt import spmatrix, matrix, solvers
#from cvxopt.solvers import qp
import numpy as np
from numpy import matlib
import random as rand
from scipy.linalg import block_diag
#import sensor_msgs.point_cloud2 as pc2
from numpy import sign
from math import pi,sqrt,atan2,sin,cos
#from moveit_commander.conversions import pose_to_list
from tf.transformations import quaternion_from_euler, euler_from_quaternion
import rosbag
import sys
from geometry_msgs.msg import Twist, Vector3
from sensor_msgs.msg import JointState,Joy,PointCloud2
from std_msgs.msg import Float64,Float64MultiArray,String,Bool
#from laser_geometry import LaserProjection
#from nav_msgs.msg import Odometry,OccupancyGrid
from tf.msg import tfMessage
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
import scipy.linalg as sp
from numpy import genfromtxt
import math
import cvxpy as cp
class RobotController():
def __init__(self):
self.N = 5
self.n = 6
self.d = 6
self.delT = 0.01
self.x0 = np.zeros((6,1))
self.last_guess = np.zeros(((self.N+1)*self.n + self.N*self.d,1))
self.solver = 'intpoint' # current options: 'cvx' or 'intpoint' or 'cvxgeneral' or 'primaldual'
# which state in the trajectory are we currently closest to?
self.closestIndex = 0
def true_velocity_callback(self, messaged):
# update the robot's joint angles
self.x0 = messaged.position
def TrackingControl(self):
# set parameters
n = self.n
d = self.d
N = self.N
last_guess = self.last_guess
# set up the QP
A,b,F,g,Q,p = self.setUpQP()
np.savetxt("A.csv", A, delimiter=",")
np.savetxt("b.csv", b, delimiter=",")
np.savetxt("F.csv", F, delimiter=",")
np.savetxt("g.csv", g, delimiter=",")
np.savetxt("Q.csv", Q, delimiter=",")
np.savetxt("p.csv", p, delimiter=",")
inputs_to_apply = np.zeros((1,6))
p2 = p.reshape(p.size,1)
g2 = g.reshape(g.size,1)
b2 = b.reshape(b.size,1)
if self.solver == 'cvx':
# METHOD ONE: CVX QP SOLVER
solvers.options['show_progress'] = False
'quiet' == True
res_cons = solvers.qp(2*matrix(Q), matrix(np.transpose(p)), matrix(F), matrix(g), matrix(A), matrix(b))
if res_cons['status'] == 'optimal':
feasible = 1
Solution = np.squeeze(res_cons['x'])
# are we sending the correct thing?
uPred = np.squeeze(np.transpose(np.reshape((Solution[n*(N+1)+np.arange(d)]),(d,1))))
inputs_to_apply = np.array([uPred[0], uPred[1], uPred[2], uPred[3], uPred[4], uPred[5]])
# this is supposed to print out the number of required iterations
iterations = res_cons['iterations']
else:
return
elif self.solver == 'cvxgeneral':
# METHOD THREE: GENERAL CVX SOLVER
x = cp.Variable(((N+1)*n + N*d,1))
objective = cp.Minimize(cp.quad_form(x,Q) + p*x)
constraints = [cp.matmul(A,x) == b, cp.matmul(F,x)<=g]
prob = cp.Problem(objective, constraints)
result = prob.solve()
# extract num_iters to get the number of iterations
iterations = prob.solver_stats.num_iters
uPred = x.value[n*(N+1)+np.arange(d)]
inputs_to_apply = np.array([uPred[0], uPred[1], uPred[2], uPred[3], uPred[4], uPred[5]])
elif self.solver == 'intpoint':
# METHOD TWO: BRIAN INTERIOR POINT SOLVER
Solution, iterations = InteriorPoint_PhaseI(Q,p2,F,g2,A,b2,last_guess)
#self.last_guess = Solution
uPred = Solution[n*(N+1)+np.arange(d)]
inputs_to_apply = np.array([uPred[0], uPred[1], uPred[2], uPred[3], uPred[4], uPred[5]])
elif self.solver == 'primaldual':
Solution, iterations = PrimalDualIP(Q,p2,F,g2,A,b2,last_guess)
#self.last_guess = Solution
uPred = Solution[n*(N+1)+np.arange(d)]
inputs_to_apply = np.array([uPred[0], uPred[1], uPred[2], uPred[3], uPred[4], uPred[5]])
elif self.solver == 'customintpoint':
Solution, iterations = InteriorPoint_PhaseI_SpeedUp_2(Q,p2,F,g2,A,b2,last_guess)
#self.last_guess = Solution
uPred = Solution[n*(N+1)+np.arange(d)]
inputs_to_apply = np.array([uPred[0], uPred[1], uPred[2], uPred[3], uPred[4], uPred[5]])
return inputs_to_apply, iterations
# send the velocity commands over the publisher
def buildIneqMatrices(self):
# Fx<=g inequality constraints:
# actuator and state constraints
n = self.n #state dimension
d = self.d #input dimension
N = self.N
F_constr = np.eye(N*n) # we constrain u0, x1, u1, x2, ... , xN, uN
F_init = np.zeros((n,n))
Fx = np.hstack((np.zeros((N*n,n)),F_constr))
Fu = np.eye(N*d)
F1 = block_diag(Fx,Fu)
F = np.vstack((F1,-F1))
zeros_array = np.zeros((n,1))
q_max = np.array([[2*np.pi],
[2*np.pi],
[2*np.pi],
[2*np.pi],
[2*np.pi],
[2*np.pi]])
q_min = np.zeros((n,1))
qx = np.tile(q_max,(N,1))
qd_max = np.array([[np.pi],
[np.pi],
[np.pi],
[2*np.pi],
[2*np.pi],
[2*np.pi]])
qu = np.tile(qd_max,(N,1))
g_unit = np.vstack((qx, qu))
g_unit2 = np.vstack((np.tile(q_min,(N,1)),qu))
g = np.vstack((g_unit, g_unit))
return F,g
def buildEqMatrices(self):
# for debugging, we have N = 3
# Ax=b, equality constraints:
# dynamics
# initialization
n = self.n #state dimension
d = self.d #input dimension
N = self.N
delT = self.delT
x0 = self.x0
# edit x0 because joints 1 and 3 are flipped during state reading.
x0 = np.reshape(x0,(n,1))
idx = [2,1,0,3,4,5]
x0 = x0[idx]
A_unit = np.eye(n)
B_unit = delT*np.eye(d)
Gx = np.eye(n * (N + 1))
Gu = np.zeros((n * (N + 1), d * (N)))
for i in range(0, N):
ind1 = n + i * n + np.arange(n)
ind2x = i * n + np.arange(n)
ind2u = i * d + np.arange(d)
Gx[np.ix_(ind1, ind2x)] = -A_unit
Gu[np.ix_(ind1, ind2u)] = -B_unit
A = np.hstack((Gx, Gu))
b = np.zeros((n*(N+1),1))
b[0:n] = x0
return A,b
def buildCostMatrices(self):
# for debugging, we have N = 3
# Ax=b, equality constraints:
# dynamics
# initialization
N = self.N
delT = self.delT
qin = self.qin
n = self.n
# print 'state to track:', qin
# qin is an ((N+1)xn)x1 vertical vector stacking the reference commands to track
qref = np.vstack((np.zeros((n,1)),qin))
n = 6 #state dimension
d = 6 #input dimension
Q1 = 30*np.eye(N*n)
Q_comp = block_diag(np.eye(n),Q1) #x0 does not have to track the reference
R = np.eye(N*d)
Q = block_diag(Q_comp,R)
p1 = np.vstack((qref, np.zeros((N*d,1))))
p = -2*np.matmul(np.transpose(p1),Q)
return Q,p
def setUpQP(self):
self.setXRef()
A,b = self.buildEqMatrices()
F,g = self.buildIneqMatrices()
Q,p = self.buildCostMatrices()
return A,b,F,g,Q,p
def addTrajectory(self, trajectory):
self.trackTrajectory = trajectory
def setXRef(self):
x0 = self.x0
N = self.N
closestIndex = self.closestIndex
trajtraj = self.trackTrajectory
# METHOD ONE: use a shifted trajectory
newIndexRange = closestIndex+1+np.arange(N)
self.closestIndex = closestIndex + 1
# build qin
qin = np.array([])
idx = [2, 1, 0, 3, 4, 5] # have to account for switched first and third joints
for row in newIndexRange:
qin = np.append(qin, trajtraj[row,idx])
self.qin = np.transpose([qin])
# Non-object functions
def importTrajFromBag(filename):
bag = rosbag.Bag(filename)
storedTrajectory=np.zeros((1,6))
for topic, msg, t in bag.read_messages(topics=['/joint_states']):
storedTrajectory = np.append(storedTrajectory, [msg.position],axis=0)
storedTrajectory = np.delete(storedTrajectory, (0), axis=0)
trajectory_length = np.shape(storedTrajectory)[0]
bag.close()
return storedTrajectory, trajectory_length
def InteriorPoint_PhaseI(Q,q,F,g,A,b,x,eps=1e-6):
ndim = A.shape[1]
iterations = 0
# want to skip the Phase I if we can quickly provide a strictly feasible point (all u = 0.)
N = A.shape[0]/6 - 1
while (True):
# check if the returned x is strictly feasible
if(all(F.dot(x)-g < 0) & np.allclose(A.dot(x),b)):
# x is strictly feasible, can continue with that x
break
else:
# we'll extract x0 from b, and forward propagate with u=0 to get the rest of the vector x
x0 = b[0:6]
x = np.vstack((np.reshape(np.tile(x0,N+1).T,((N+1)*6,1)),np.zeros((N*6,1))))
##################### PHASE II ############################
###########################################################
#use x0 returned from Phase I
t = 1000.0
mu = 10.0
Beta = 0.9
F1,F2 = np.shape(F)
while(F.shape[0]/t > eps):
t = mu*t
while(True):
lower_entries = -np.hstack((np.diagflat(1/(x[6:] - g[(g.shape[0]/2):])), np.diagflat(1/(x[6:] + g[(g.shape[0]/2):]))))
gradPhi2 = np.vstack((np.zeros((6,2*ndim-12)), lower_entries))
gradPhi2 = np.reshape(gradPhi2.sum(axis=1), (np.shape(gradPhi2)[0],1))
#brian_gradPhi2 = F.T.dot(1/-(Fdxdg))
# new time: 0.002716
#brian_hessPhi2 = F.T.dot(np.diagflat(np.square(1/-(Fdxdg)))).dot(F)
#my_hessPhi2_unit = 2*np.diagflat(np.square(1/g[(g.shape[0]/2):]))
hessPhi2 = block_diag(np.zeros((6,6)), 2*np.diagflat(np.square(1/g[(g.shape[0]/2):])))
# new time: 0.0031
KKTmatrix2 = np.vstack((np.hstack((t*(2*Q)+hessPhi2,A.T)),\
np.hstack((A,np.zeros((A.shape[0],A.shape[0]))))))
gradF02 = t*(2*Q.dot(x)+q)
#v = np.linalg.lstsq(KKTmatrix2,-np.vstack((gradF02+gradPhi2,A.dot(x)-b)))[0]
# KKT matrix also depends on t, so can't pre-do the inversion?
#v = np.linalg.inv(KKTmatrix2).dot(-np.vstack((gradF02+gradPhi2,A.dot(x)-b)))
v = sp.solve(KKTmatrix2,-np.vstack((gradF02+gradPhi2,A.dot(x)-b)),assume_a='sym')
#check log decrement
#if(v[:ndim].T.dot(np.linalg.lstsq(2*Q+hessPhi2,v[:ndim])[0])/2 < eps ):
if(v[:ndim].T.dot(sp.solve(2*Q+hessPhi2,v[:ndim],assume_a='pos'))/2 < eps ):
break
#backtracking linesearch to maintain feasibility
Alpha = 1
while(any(F.dot(x+Alpha*v[:ndim])-g> 0)):
Alpha = Beta*Alpha
#check to see if already taking excessively small stepsizes
if(Alpha <= eps):
break
#update x and s
x = x + Alpha*v[:ndim]
iterations += 1
return x, iterations
def InteriorPoint_PhaseI_SpeedUp_2(Q,q,F,g,A,b,x,eps=1e-6):
ndim = A.shape[1]
iterations = 0
# want to skip the Phase I if we can quickly provide a strictly feasible point (all u = 0.)
N = A.shape[0]/6 - 1
while (True):
# check if the returned x is strictly feasible
if(all(F.dot(x)-g < 0) & np.allclose(A.dot(x),b)):
# x is strictly feasible, can continue with that x
break
else:
# we'll extract x0 from b, and forward propagate with u=0 to get the rest of the vector x
x0 = b[0:6]
x = np.vstack((np.reshape(np.tile(x0,N+1).T,((N+1)*6,1)),np.zeros((N*6,1))))
##################### PHASE II ############################
###########################################################
#use x0 returned from Phase I
t = 1000.0
mu = 10.0
Beta = 0.9
F1,F2 = np.shape(F)
while(F.shape[0]/t > eps):
t = mu*t
while(True):
lower_entries = -np.hstack((np.diagflat(1/(x[6:] - g[(g.shape[0]/2):])), np.diagflat(1/(x[6:] + g[(g.shape[0]/2):]))))
gradPhi2 = np.vstack((np.zeros((6,2*ndim-12)), lower_entries))
gradPhi2 = np.reshape(gradPhi2.sum(axis=1), (np.shape(gradPhi2)[0],1))
#brian_gradPhi2 = F.T.dot(1/-(Fdxdg))
# new time: 0.002716
#brian_hessPhi2 = F.T.dot(np.diagflat(np.square(1/-(Fdxdg)))).dot(F)
#my_hessPhi2_unit = 2*np.diagflat(np.square(1/g[(g.shape[0]/2):]))
hessPhi2 = block_diag(np.zeros((6,6)), 2*np.diagflat(np.square(1/g[(g.shape[0]/2):])))
# new time: 0.0031
gradF02 = t*(2*Q.dot(x)+q)
#elimination method
H = t*2*Q+hessPhi2
z1 = sp.solve(H,A.T,assume_a='pos')
z2 = sp.solve(H,gradF02+gradPhi2,assume_a='pos')
S = -A.dot(sp.solve(H,A.T,assume_a='pos'))
w = sp.solve(S,(A.dot(z2)-(A.dot(x)-b)),assume_a='sym')
v = np.vstack((-z1.dot(w)-z2,w))
#check log decrement
#if(v[:ndim].T.dot(np.linalg.lstsq(2*Q+hessPhi2,v[:ndim])[0])/2 < eps ):
if(v[:ndim].T.dot(sp.solve(2*Q+hessPhi2,v[:ndim],assume_a='pos'))/2 < eps ):
break
#backtracking linesearch to maintain feasibility
Alpha = 1
while(any(F.dot(x+Alpha*v[:ndim])-g> 0)):
Alpha = Beta*Alpha
#check to see if already taking excessively small stepsizes
if(Alpha <= eps):
break
#update x and s
x = x + Alpha*v[:ndim]
iterations += 1
return x, iterations
def PrimalDualIP(Q,q,F,g,A,b,x,eps=1e-10):
##################### PHASE I #############################
###########################################################
iterations = 0
ndim = A.shape[1]
# if(x==None):
# x = np.random.randn(ndim,1) #initialize x0
t = 100.0
mu = 20.0
Beta = 0.8
############################# PHASE I #########################################
# want to skip the Phase I if we can quickly provide a strictly feasible point (all u = 0.)
N = A.shape[0]/6 - 1
while (True):
# check if the returned x is strictly feasible
if(all(F.dot(x)-g < 0) & np.allclose(A.dot(x),b)):
# x is strictly feasible, can continue with that x
break
else:
# we'll extract x0 from b, and forward propagate with u=0 to get the rest of the vector x
x0 = b[0:6]
x = np.vstack((np.reshape(np.tile(x0,N+1).T,((N+1)*6,1)),np.zeros((N*6,1))))
##################### PHASE II ############################
###########################################################
#use x0 returned from Phase I
mu = 20.0
Lambda = 1e-1*np.ones((F.shape[0],1))
Nu = np.zeros((A.shape[0],1))
Alpha = 0.1
Beta = 0.3
t = mu*F.shape[0]/(-(F.dot(x)-g).T.dot(Lambda))
oldRvec = np.vstack(((2*Q.dot(x)+q)+F.T.dot(Lambda)+A.T.dot(Nu),\
-np.diag(Lambda.flatten()).dot(F.dot(x)-g)-1/t*np.ones((F.shape[0],1)),\
A.dot(x)-b))
while(True):
KKTmatrix = np.vstack((np.hstack((2*Q,F.T,A.T)),\
np.hstack((-np.diag(Lambda.flatten()).dot(F),-np.diag((F.dot(x)-g).flatten()),np.zeros((F.shape[0],A.shape[0])))),\
np.hstack((A,np.zeros((A.shape[0],F.shape[0])),np.zeros((A.shape[0],A.shape[0]))))))
v = np.linalg.inv(KKTmatrix).dot(-oldRvec)
ratio = np.divide(-Lambda,v[ndim:ndim+F.shape[0]])
ratio[ratio<0] = 1
stepSize = 0.99*np.min(np.vstack((1,np.min(ratio))))
x_new = x+stepSize*v[:ndim]
Lambda_new = Lambda+stepSize*v[ndim:ndim+F.shape[0]]
Nu_new = Nu+stepSize*v[ndim+F.shape[0]:]
while(any((F.dot(x_new)-g) >0) or \
np.linalg.norm(np.vstack(((2*Q.dot(x_new)+q)+F.T.dot(Lambda_new)+A.T.dot(Nu_new),\
-np.diag(Lambda_new.flatten()).dot(F.dot(x_new)-g)-1/t*np.ones((F.shape[0],1)),\
A.dot(x_new)-b))) >\
(1-Alpha*stepSize)*np.linalg.norm(oldRvec)):
stepSize = Beta*stepSize
#update x,lambda,nu
x_new = x+stepSize*v[:ndim]
Lambda_new = Lambda+stepSize*v[ndim:ndim+F.shape[0]]
Nu_new = Nu+stepSize*v[ndim+F.shape[0]:]
stepSize
if(stepSize<eps):
break
if(stepSize<eps):
break
x = x_new
Lambda = Lambda_new
Nu = Nu_new
t = mu*F.shape[0]/(-(F.dot(x)-g).T.dot(Lambda))
oldRvec = np.vstack(((2*Q.dot(x)+q)+F.T.dot(Lambda)+A.T.dot(Nu),\
-np.diag(Lambda.flatten()).dot(F.dot(x)-g)-1/t*np.ones((F.shape[0],1)),\
A.dot(x)-b))
iterations+=1
return x, iterations
############################################################################################################################
################################################## MAIN SCRIPT #############################################################
############################################################################################################################
# intialize the node
sys.path.append('/home/tony/Desktop')
#print sys.path
rospy.init_node('ur5e_control', anonymous=True)
# initialize the controller.
# this also starts the subscriber to joint states, which we use to set the xreference
UR5E = RobotController()
joint_sub = rospy.Subscriber('joint_states',JointState, UR5E.true_velocity_callback)
command_pub = rospy.Publisher('joint_group_vel_controller/command',Float64MultiArray,queue_size =1)
timing_pub = rospy.Publisher('solver_time',Float64,queue_size=1)
iterations_pub = rospy.Publisher('solver_iterations',Float64,queue_size=1)
# this bag has to be in the folder
# for smooth movements, the recorded trajectory needs to be the same recorded frequency as the ROS will run
storedTrajectory, trajectory_length = importTrajFromBag('/home/tony/Desktop/p_scene2_2019-04-23-17-18-22.bag') #100 Hz
#storedTrajectory, trajectory_length = importTrajFromBag('/home/tony/Desktop/circle_trajectory_noweight_2__2019-05-07-10-46-36.bag') #100 Hz
#storedTrajectory, trajectory_length = importTrajFromBag('/home/ur5/Desktop/simrobotfiles/1-0.5-0.5-_2019-04-22-18-04-10.bag')
UR5E.addTrajectory(storedTrajectory)
# now we loop.
loop_rate = 100
#rate = rospy.Rate(loop_rate)
joint_vels = Float64MultiArray()
while UR5E.closestIndex + UR5E.N < trajectory_length:
# solve the QP using chosen matrix
start_time = time.time()
input_calc, solver_iterations = UR5E.TrackingControl()
calc_time = time.time() - start_time
# send input to the controller
joint_vels.data = input_calc
command_pub.publish(joint_vels)
# publish solver iterations
iterations_pub.publish(solver_iterations)
#print calc_time
timing_pub.publish(calc_time)
joint_vels.data = np.array([0,0,0,0,0,0])
command_pub.publish(joint_vels)
print 'Finished'
# A = genfromtxt('/home/ur5/.ros/A.csv', delimiter=',')
# b = genfromtxt('/home/ur5/.ros/b.csv', delimiter=',')
# Q = genfromtxt('/home/ur5/.ros/Q.csv', delimiter=',')
# q = genfromtxt('/home/ur5/.ros/p.csv', delimiter=',')
# F = genfromtxt('/home/ur5/.ros/F.csv', delimiter=',')
# g = genfromtxt('/home/ur5/.ros/g.csv', delimiter=',')
# q = q.reshape(q.size,1)
# g = g.reshape(g.size,1)
# b = b.reshape(b.size,1)
# print np.shape(A)
# print np.shape(b)
# print np.shape(Q)
# print np.shape(q)
# print np.shape(F)
# print np.shape(g)
# Q[:6,:6] = np.identity(6)
# g[g==0] = 2*math.pi
# start = time.time()
# x = InteriorPoint_PhaseI(Q,q,F,g,A,b)
# end = time.time()
# print('Inequalities')
# print(F.dot(x)-g)
# print('Equality Violation')
# print(A.dot(x)-b)
# print('Obtained x:')
# print(x)
# print x.T.dot(Q).dot(x)+q.T.dot(x)
# print(end-start)
| [
"tony.h.zheng@gmail.com"
] | tony.h.zheng@gmail.com |
fc1594f425c1a54f1e64a6aef2c262b5c450c273 | 736730d72c24470a0c9ba58309ee3a95fe09d5e4 | /projeto/feriados/feriados/urls.py | d23c9df344b2022619b6e27648331c943d93a479 | [] | no_license | orlandosaraivajr/FATEC_2021_1SEM_Topicos3 | 3f9c6b983c8b012330527848862f9f22649f0f5a | 83610f798510e1bad69eedaed6b3b4ed08e2014e | refs/heads/master | 2023-05-02T10:24:05.865947 | 2021-05-19T00:20:38 | 2021-05-19T00:20:38 | 339,551,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('feriado.urls')),
]
| [
"orlandosaraivajr@gmail.com"
] | orlandosaraivajr@gmail.com |
a46a09b36dea4eddb1483fcdee6e292962b2ab51 | f47d17b53977cf745d453b654529e8cd6be7890f | /3level_N20_ainbin1.py | 120aacee0e37205a96e1666348518b2b537c19d0 | [] | no_license | rareearthquantum/model_upconversion_peter | b4cce7556a167ba0e9813625dc924d3542d33cd1 | dcf08000ec21770659318409a686bb2b88a7a1be | refs/heads/master | 2020-04-28T19:54:34.795590 | 2019-06-14T09:43:28 | 2019-06-14T09:43:28 | 175,526,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | from Frequency_response_3level import *
p = {}
p['deltamu'] = 0.
p['deltao'] = 0.
p['d13'] = 2e-32*math.sqrt(1/3)
p['d23'] = 2e-32*math.sqrt(2/3)
p['gamma13'] = p['d13']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma23'] = p['d23']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma2d'] = 1e6
p['gamma3d'] = 1e6
p['nbath'] = 20
p['gammamu'] = 1/(p['nbath']+1) * 1e3
p['go'] = 51.9 #optical coupling
p['No'] = 1.28e15 # number of atoms in the optical mode
p['deltac']=0 #detuning for
p['kappaoi']=2*pi*7.95e6 # intrinsic loss for optical resonator
p['kappaoc']=2*pi*1.7e6 # coupling loss for optical resonator
#p['df']=0.1e6 # how small descretisation step to take when integrating over the
# inhomogeneous lines
p['mean_delam']=0
p['sd_delam']=2*pi*25e6/2.355 #microwave inhomogeneous broadening
#2.355is to turn FWHM into standard deviation
p['mean_delao']=0
p['sd_delao']=2*pi*170e6/2.355 #optical inhomogeneous broadening
p['kappami'] = 650e3*2*pi # intrinsic loss for microwave cavity
p['kappamc'] = 70e3*2*pi # coupling loss for optical cavity
# this is for one of the two output ports
p['Nm'] = 2.22e16 #toal number of atoms
p['gm'] = 1.04 #coupling between atoms and microwave field
p['gammaoc']=2*pi*1.7e6
p['gammaoi']=2*pi*7.95e6
p['gammamc']=2*pi*70e3
p['gammami']=2*pi*650e3
muBohr=927.4009994e-26; # Bohr magneton in J/T in J* T^-1
p['mu12'] = 4.3803*muBohr # transition dipole moment for microwave cavity (J T^-1)
p['Lsample']=12e-3 # the length of the sample, in m
p['dsample']=5e-3 # the diameter of the sample, in m
p['fillfactor']=0.8 #microwave filling factor
p['freqmu'] = 5.186e9
p['freq_pump'] = 195113.36e9 #pump frequency
p['freqo']=p['freqmu']+p['freq_pump']
p['Lcavity_vac'] = 49.5e-3 # length of the vacuum part of the optical
# Fabry Perot (m)
p['Wcavity'] = 0.6e-3# width of optical resonator beam in sample (m)
p['nYSO'] = 1.76
p['Omega']=-492090.88755145477
delovals=np.linspace(-20e5,20e5,31)
delmvals=np.linspace(-1e6,1e6,31)
binvals=[600000]
ainvals=[600000]
aoutvals,boutvals,effic_a,effic_b=find_outputs(ainvals,binvals,delovals,delmvals,p)
np.savez('output_N20_ainbin1',aoutvals=aoutvals,boutvals=boutvals,effic_a=effic_a,effic_b=effic_b,ainvals=ainvals,binvals=binvals,delovals=delovals,delmvals=delmvals,p=p)
| [
"peterbarnettnz@gmail.com"
] | peterbarnettnz@gmail.com |
be83bd3e4c5b5221a68a0aacf0e2f692f9a4a957 | de4e1332950d37707620c54a9613258c1dd9489c | /yusang/week8/1463:1로만들기.py | bddccde28e3aed92255c8a7e8674693488e55624 | [] | no_license | PnuLikeLion9th/Summer_algorithm | 8fe74066b9673fb891b7205f75f808a04c7fe750 | dcfcb6325854b3b4c529451d5c6b162298b53bc1 | refs/heads/master | 2023-07-10T13:57:05.511432 | 2021-08-15T07:50:00 | 2021-08-15T07:50:00 | 378,679,514 | 3 | 10 | null | 2021-08-15T07:50:01 | 2021-06-20T15:32:18 | Python | UTF-8 | Python | false | false | 271 | py | n = int(input())
dp = [0 for _ in range(n+1)]
for i in range(2, n+1):
dp[i] = dp[i-1] + 1
if i%2 == 0 and dp[i] > dp[i//2] + 1 :
dp[i] = dp[i//2]+1
if i%3 == 0 and dp[i] > dp[i//3] + 1 :
dp[i] = dp[i//3] + 1
print(dp[n]) | [
"dfhilder@gmail.com"
] | dfhilder@gmail.com |
6630c33e6fda4d0900961391f88428c3491da5e9 | 618bf166a2efabcfed275fed9a6901611862329a | /sudoku.py | 08b2fc5432856a63d90df423313562ef872cfa91 | [] | no_license | ksanu1998/pacman | 6ae8a204766180e60017a4e3cb05e2e2af9a342e | 7916fbae907c553ff5504ecca8d8f5fcfc093e8c | refs/heads/master | 2020-12-06T00:45:53.009967 | 2020-02-03T19:26:48 | 2020-02-03T19:26:48 | 232,291,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,142 | py | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
#| code for solving 4 x 4 sudoku puzzles using different search algorithms and heuristic functions |
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
# Some important notes:
# move gen has two different objectives to implement: find next empty cell to be filled in the matrix and decide what value is supposed to be filled in this next empty cell
# to implement both of these, heuristics are used
# there are two different kinds of heuristics used, one which fetches the next cell according to some value; and the other which sorts the neighbours according to some value
# mode in the driver code defines the first type of heuristic to be chosen based either on the maximun constrained empty cell or empty cell in the order of matrix traversal; and the second type of heuristic is the default one for sorting neighbours of the current state using the constraint number of the neighbour matrix
import sys
import heapq # heapq module for implementing priority queue
import queue # queue module for implementing queue
import copy # copy module for copying nested lists
# function to process input sudoku puzzle from text file into matrix
def process_input(input_file):
matrix = []
f = open(input_file, "r+")
for rows in f:
l = []
for cols in rows:
if cols == "\n":
break
l.append(int(cols))
matrix.append(l)
return matrix
# function to check if goal state is reached (goal test)
def is_matrix_solved(matrix):
final_state = True
for i in range(0,4):
for j in range(0,4):
if matrix[i][j] == 0:
final_state = False
break
if final_state:
return True
# function to get the next most constrained empty cell in the matrix
def get_most_constrained_cell(matrix):
max_constraints = 0
x, y = 0, 0
for i in range(0,4): # for every cell in the matrix which is empty, calculate the constraint number and find the cell with maximum constrained number
for j in range(0,4):
constraints = 0
if matrix[i][j]==0:
for a in range(0,4):
if a!=i:
if matrix[a][j]!=0:
constraints+=1
break
for b in range(0,4):
if b!=j:
if matrix[i][b]!=0:
constraints+=1
break
if constraints>max_constraints:
max_constraints = constraints
x, y = i, j
return x, y
# function to calculate the constaint number of the given configuration of matrix (state)
def get_constraint_number_of_matrix(new_matrix, val, x, y):
new_matrix[x][y] = val
constraints = 0
for i in range(0,4):
for j in range(0,4):
if new_matrix[i][j]==0:
for a in range(0,4):
if a!=i:
if new_matrix[a][j]!=0:
constraints+=1
break
for b in range(0,4):
if b!=j:
if new_matrix[i][b]!=0:
constraints+=1
break
return constraints
# function to get the next empty cell in the matrix in the order of traversal
def get_next_empty_cell(matrix):
for i in range(0,4):
for j in range(0,4):
if matrix[i][j] == 0:
return i, j
# function to get the quadrant of a given cell in the matrix
def get_quadrant(x, y):
x+=1
y+=1
if((x == 1 or x == 2) and (y == 1 or y == 2)):
return 0
elif((x == 1 or x == 2) and (y == 3 or y == 4)):
return 1
elif((x == 3 or x == 4) and (y == 1 or y == 2)):
return 2
else:
return 3
# function to check if a given configuration of matrix (state) is valid
def is_valid_configuration(matrix, x, y, val):
is_valid = True
for i in range(0,4): # check if the value entered in current cell (x,y) is unique in its column
if matrix[i][y] == val and i!=x:
is_valid = False
return is_valid
for j in range(0,4): # check if the value entered in current cell (x,y) is unique in its row
if matrix[x][j] == val and j!=y:
is_valid = False
return is_valid
quadrant = get_quadrant(x, y) # check the quadrant of the value entered in current cell (x,y) and check if all the values in this quadrant are unique
if quadrant == 0:
a, b = quadrant, quadrant
elif quadrant == 1:
a, b = quadrant-1, quadrant+1
elif quadrant == 2:
a, b = quadrant, quadrant-2
else:
a, b = quadrant-1, quadrant-1
for i in range(a,a+2):
for j in range(b,b+2):
if i!=x and j!=y:
if matrix[i][j] == val:
is_valid = False
break;
return is_valid
# function to implement best-first search
def best_first_search(matrix, mode):
bfs_q = []
heapq.heappush(bfs_q,(0, matrix)) # priority queue to contain matrix states
bfs_states_explored = 0
while (len(bfs_q)!=0): # while queue is not empty
bfs_states_explored+=1
current_matrix = heapq.heappop(bfs_q) # pop the head from the priority queue
current_matrix = current_matrix[1]
if is_matrix_solved(current_matrix.copy()): # check for goal state
return current_matrix, bfs_states_explored
if mode == 0: # apply heurisitc acccording to mode
x, y = get_next_empty_cell(current_matrix.copy())
elif mode == 1:
x, y = get_most_constrained_cell(current_matrix.copy())
####################
# print(x, y)
# for i in range(0,4):
# print(current_matrix[i])
####################
for i in range(1,5): # check neighbours
temp_matrix = copy.deepcopy(current_matrix)
if is_valid_configuration(temp_matrix, x, y, i): # if valid, push to priority queue based on heuristic value
heapq.heappush(bfs_q,(-get_constraint_number_of_matrix(temp_matrix, i, x, y), temp_matrix))
# print("\n")
# function to implement hill-climbing search
def hill_climbing_search(matrix, mode):
hcs_q = queue.Queue(maxsize=0) # queue to contain matrix states
hcs_q.put(matrix) # initialize the queue with initial state
hcs_states_explored = 0
while (not(hcs_q.empty())): # while queue is not empty
hcs_states_explored+=1
current_matrix = hcs_q.get() # pop the head of the queue
if is_matrix_solved(current_matrix.copy()): # check for goal state
return current_matrix, 1, hcs_states_explored
if mode == 0: # apply heurisitc acccording to mode
x, y = get_next_empty_cell(current_matrix.copy())
elif mode == 1:
x, y = get_most_constrained_cell(current_matrix.copy())
####################
# print(x, y)
# for i in range(0,4):
# print(current_matrix[i])
####################
next_cell = [] # list to store neighbours
for i in range(1,5):
temp_matrix = copy.deepcopy(current_matrix)
if is_valid_configuration(temp_matrix, x, y, i): # if valid, add to neighbours list
next_cell.append((-get_constraint_number_of_matrix(temp_matrix, i, x, y), temp_matrix))
sorted(next_cell, key=lambda tup: tup[0]) # sort the neoighbours according to heuristic value
if len(next_cell) == 0: # if local optimum (no further valid neighbours) is reached, return current state and mention that goal state is not reached
return current_matrix, 0, hcs_states_explored
hcs_q.put(next_cell[0][1]) # add the neighbour with best heuristic value to the queue
# print("\n")
# function to implement variable neighbourhood descent search
def variable_neighbourhood_descent_search(matrix, mode):
vnds_q = queue.Queue(maxsize=0) # queue to contain matrix states
vnds_q.put(matrix) # initialize the queue with initial state
neighbour_density = 1 # initialize neighbour density to 1
vnds_states_explored = 0
while (not(vnds_q.empty())): # while queue is not empty
vnds_states_explored+=1
current_matrix = vnds_q.get() # pop the head of the queue
if is_matrix_solved(current_matrix.copy()): # check for goal state
return current_matrix, 1, vnds_states_explored
if mode == 0: # apply heurisitc acccording to mode
x, y = get_next_empty_cell(current_matrix.copy())
elif mode == 1:
x, y = get_most_constrained_cell(current_matrix.copy())
####################
# print(x, y)
# for i in range(0,4):
# print(current_matrix[i])
####################
next_cell = [] # list to store neighbours
for i in range(1,5):
temp_matrix = copy.deepcopy(current_matrix)
if is_valid_configuration(temp_matrix, x, y, i): # if valid, add to neighbours list
next_cell.append((-get_constraint_number_of_matrix(temp_matrix, i, x, y), temp_matrix))
sorted(next_cell, key=lambda tup: tup[0]) # sort the neoighbours according to heuristic value
if len(next_cell) == 0: # if local optimum (no further valid neighbours) is reached, restart from initial state and increase neighbour density
neighbour_density+=1
vnds_q.put(matrix)
else: # add those many neigbours to the queue as is the neighbour density
for i in range(0, len(next_cell)):
if i>neighbour_density:
break
vnds_q.put(next_cell[i][1])
# function to implement beam search
def beam_search(matrix, beam_density, mode):
bs_q = []
heapq.heappush(bs_q,(0, matrix)) # priority queue to contain matrix states
bs_states_explored = 0
while (len(bs_q)!=0): # while queue is not empty
bs_states_explored+=1
current_matrix = heapq.heappop(bs_q) # pop the head from the priority queue
current_matrix = current_matrix[1]
if is_matrix_solved(current_matrix.copy()): # check for goal state
return current_matrix, 1, bs_states_explored
if mode == 0: # apply heurisitc acccording to mode
x, y = get_next_empty_cell(current_matrix.copy())
elif mode == 1:
x, y = get_most_constrained_cell(current_matrix.copy())
####################
# print(x, y)
# for i in range(0,4):
# print(current_matrix[i])
####################
beam_counter = 0
for i in range(1,5): # check neighbours
temp_matrix = copy.deepcopy(current_matrix)
if is_valid_configuration(temp_matrix, x, y, i): # if valid, push to priority queue based on heuristic value
heapq.heappush(bs_q,(-get_constraint_number_of_matrix(temp_matrix, i, x, y), temp_matrix))
beam_counter+=1
if beam_counter == beam_density: # push only those many neighbours as is the size of beam width
break
return current_matrix, 0, bs_states_explored
# function to check if a given state is tabu
def is_tabu(matrix, x, y, i, tabu_list, tenure):
in_tabu_list = False
matrix[x][y] = i
for i in range(0, tabu_list.qsize()):
if tabu_list.queue[i]==matrix:
in_tabu_list = True
break
return in_tabu_list
# function to implement tabu search
def tabu_search(matrix, tenure, mode):
ts_q = queue.Queue(maxsize=0) # queue to contain matrix state
ts_q.put(matrix) # initialize queue with initial matrix state
# aspiration_criterion = 1
ts_states_explored = 0 # counter for number of states explored
tabu_list = queue.Queue(maxsize=0) # tabu list of size tenure
while (not(ts_q.empty())): # run until queue is not empty
ts_states_explored+=1
current_matrix = ts_q.get() # pop the head of the queue
# tabu_list.put(current_matrix)
if is_matrix_solved(current_matrix.copy()): # check for goal state
return current_matrix, ts_states_explored, 1
if mode == 0: # apply heurisitc acccording to mode
x, y = get_next_empty_cell(current_matrix.copy())
elif mode == 1:
x, y = get_most_constrained_cell(current_matrix.copy())
####################
# print(x, y)
# for i in range(0,4):
# print(current_matrix[i])
####################
next_cell = [] # list to store neighbour states of current state
for i in range(1,5): # check neighbours
temp_matrix = copy.deepcopy(current_matrix)
if is_valid_configuration(temp_matrix, x, y, i) and (not is_tabu(temp_matrix, x, y, i, tabu_list, tenure)): # check if neighbour state is valid and is not in tabu list
next_cell.append((-get_constraint_number_of_matrix(temp_matrix, i, x, y), temp_matrix)) # append to neighbours list
sorted(next_cell, key=lambda tup: tup[0]) # sort neighbours according to heuristic value
if len(next_cell) == 0: # if local optimum (no further valid neighbours) is reached, restart from initial state
# ts_q.put(tabu_list.get())
ts_q.put(matrix)
else: # add neighbours to queue, add them to tabu list and if tabu list is full, pop the oldest entry (head) from the list
for i in range(0, len(next_cell)):
# print(len(next_cell), tenure)
if tabu_list.qsize() == tenure:# if i>aspiration_criterion:# or ts_q.qsize() == tenure:
tabu_list.get()
# break
ts_q.put(next_cell[i][1])
tabu_list.put(next_cell[i][1])
def main():
# declarations
matrix = process_input(sys.argv[1]) # problem sudoku matrix (initial state)
bfs_matrix = copy.deepcopy(matrix) # instance of initial state for applying best-first search
hcs_matrix = copy.deepcopy(matrix) # instance of initial state for applying hill-climbing search
vnds_matrix = copy.deepcopy(matrix) # instance of initial state for applying variable neighbourhood descent search
bs_matrix = copy.deepcopy(matrix) # instance of initial state for applying beam search
ts_matrix = copy.deepcopy(matrix) # instance of initial state for applying tabu search
# heuristic functions:
# mode = 0 => get next empty cell in order of matrix traversal
# mode = 1 => get next mosht constrained cell
bfs_mode = 0 # heuristic mode for best-first search
hcs_mode = 0 # heuristic mode for hill-climbing search
vnds_mode = 0 # heuristic mode for variable neighbourhood descent search
bs_mode = 0 # heuristic mode for beam search
beam_density = 2 # beam width for beam search
ts_mode = 0 # heuristic mode for tabu search
# driver code
bfs_matrix, bfs_states_explored = best_first_search(bfs_matrix, bfs_mode) # apply best-first search and print result
print("Puzzle solved after exploring", bfs_states_explored,"states using Best First Search ;)")
for i in range(0,4):
print(bfs_matrix[i])
print("*************")
######################################################################################################################################
hcs_matrix, flag1, hcs_states_explored = hill_climbing_search(hcs_matrix, hcs_mode) # apply hill-climbing search and print result
if not flag1:
print("Goal state not reached after exploring", hcs_states_explored,"states using Hill Climbing Search. Puzzle un-solved :(")
else:
print("Puzzle solved after exploring", hcs_states_explored, "states using Hill Climbing Search!")
for i in range(0,4):
print(hcs_matrix[i])
print("*************")
######################################################################################################################################
vnds_matrix, flag2, vnds_states_explored = variable_neighbourhood_descent_search(vnds_matrix, vnds_mode) # apply variable neighbourhood descent search and print result
if not flag2:
print("Goal state not reached after exploring", vnds_states_explored, "states using Variable Neighbourhood Descent Search. Puzzle un-solved :(")
else:
print("Puzzle solved after exploring", vnds_states_explored, "states using Variable Neighbourhood Descent Search ;)")
for i in range(0,4):
print(vnds_matrix[i])
print("*************")
######################################################################################################################################
bs_matrix, flag3, bs_states_explored = beam_search(bs_matrix, beam_density, bs_mode) # apply beam search and print result
if not flag3:
print("Goal state not reached after exploring", bs_states_explored, "states using Beam Search with width", beam_density, ". Puzzle un-solved :(")
else:
print("Puzzle solved after exploring", bs_states_explored, "states using Beam Search with width", beam_density, " ;)")
for i in range(0,4):
print(bs_matrix[i])
print("*************")
######################################################################################################################################
ts_matrix, ts_states_explored, flag4 = tabu_search(ts_matrix, 10, ts_mode) # apply tabu search and print result
if not flag4:
print("Goal state not reached after exploring", ts_states_explored, "states using Tabu Search. Puzzle un-solved :(")
else:
print("Puzzle solved after exploring", ts_states_explored, "states using Tabu Search ;)")
for i in range(0,4):
print(ts_matrix[i])
print("*************")
######################################################################################################################################
main() | [
"noreply@github.com"
] | noreply@github.com |
a4889fa5b155d6284ff64e05f0a3f2bc3761b20a | 246556b07fa9f42e0f5d424022c97eb62be259df | /Data Handling/Problem_1.py | e36c5e5e5ea7fe12cff872a49fa4fd017ff494b4 | [] | no_license | bhavsarp456-pnb/bhavsarp456 | c4017cacf009cacd080ca5cdcead7ae4a90c5043 | 151c8b353b0a87497159d2c3342d89c3c671bc69 | refs/heads/main | 2023-03-06T15:10:29.552958 | 2021-02-15T05:36:30 | 2021-02-15T05:36:30 | 338,974,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | file = open("C:\\Users\\Dell\\OneDrive\\Desktop\\Laptop_List.txt","a+")
file.writelines(['Dell\n','Apple\n','HP\n','Acer\n','Asus\n','Lenevo\n','Toshiba\n'])
file.seek(0)
x = file.readlines()
NewData = [i for i in x if i.startswith("A")]
file2 = open("C:\\Users\\Dell\\OneDrive\\Desktop\\Sorted_Laptop_List.txt","a+")
file2.write("Sorted list is:-\n")
file2.writelines(NewData)
file2.seek(0)
print(file.read())
file2.close() | [
"noreply@github.com"
] | noreply@github.com |
ebc1f9d063c620c66e017cdc4cf70f528dbfdc02 | c9f32890446ca90b7b74837ead806c8d49a02705 | /face2.py | 436cddf3c4247039ae6ae76d8975cb1a851a92b9 | [] | no_license | saeed4v/Eye-Controlled-Command-System | 464b0e34e1866a3ef802657a9a66fdaf8511ea9b | 3162b2af37693de70ad3e4e64416b3b2d05ecbbb | refs/heads/master | 2021-01-01T05:15:43.905260 | 2016-05-11T14:47:11 | 2016-05-11T14:47:11 | 58,529,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,849 | py | import numpy as np
import cv2
import collections
import picamera
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
eye0_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
font=cv2.FONT_HERSHEY_SIMPLEX
cap = picamera.PiCamera()
#cap.start_preview()
cap.resolution = (340,240)
cap.framerate=50
#cap = cv2.VideoCapture(0)
tym=0
d = collections.deque([0,1,2,3,4])
def check():
cap.capture('image.bmp')
img=cv2.imread('image.bmp')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
eye0 = eye0_cascade.detectMultiScale(gray, 1.3, 5)
flag=0
for (x,y,w,h) in eye0:
flag=1
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
#print roi_color
if flag == 1:
print 'Eye Open'
tym=0
else:
print 'Eye Closed'
tym=tym+1
if tym >2 :
print 'Next'
d.rotate(1)
tym=0
def display(y):
if y == 0:
check=0
cv2.putText(img,'FOOD',(10,50),font,1,(255,0,0),2)
cv2.putText(img,'WATER',(10,400),font,1,(255,255,255),2)
cv2.putText(img,'TOILET',(500,40),font,1,(255,255,255),2)
cv2.putText(img,'DOCTOR',(450,400),font,1,(255,255,255),2)
cv2.putText(img,'Close eyes to confirm',(100,200),font,1,(255,255,255),1)
## while check<10:
## check=check+1
## cap.capture('image.bmp')
## img=cv2.imread('image.bmp')
## gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## eye0 = eye0_cascade.detectMultiScale(gray, 1.3, 5)
## flag=0
## for (x,y,w,h) in eye0:
##
##
##
## flag=1
## cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
## roi_gray = gray[y:y+h, x:x+w]
## roi_color = img[y:y+h, x:x+w]
## #print roi_color
##
## if flag == 1:
## print 'Eye Open'
## tym=0
## else:
## print 'Eye Closed'
## tym=tym+1
##
##
## if tym >2 :
## print 'Next'
##
##
## tym=0
##
elif y == 1:
cv2.putText(img,'FOOD',(10,50),font,1,(255,255,255),2)
cv2.putText(img,'WATER',(10,400),font,1,(255,0,0),2)
cv2.putText(img,'TOILET',(500,40),font,1,(255,255,255),2)
cv2.putText(img,'DOCTOR',(450,400),font,1,(255,255,255),2)
cv2.putText(img,'Close eyes to confirm',(100,200),font,1,(255,255,255),1)
elif y == 2:
cv2.putText(img,'FOOD',(10,50),font,1,(255,255,255),2)
cv2.putText(img,'WATER',(10,400),font,1,(255,255,255),2)
cv2.putText(img,'TOILET',(500,40),font,1,(255,0,0),2)
cv2.putText(img,'DOCTOR',(450,400),font,1,(255,255,255),2)
cv2.putText(img,'Close eyes to confirm',(100,200),font,1,(255,255,255),1)
elif y == 3:
cv2.putText(img,'FOOD',(10,50),font,1,(255,255,255),2)
cv2.putText(img,'WATER',(10,400),font,1,(255,255,255),2)
cv2.putText(img,'TOILET',(500,40),font,1,(255,255,255),2)
cv2.putText(img,'DOCTOR',(450,400),font,1,(255,0,0),2)
cv2.putText(img,'Close eyes to confirm',(100,200),font,1,(255,255,255),1)
else:
cv2.putText(img,'FOOD',(10,50),font,1,(255,255,255),2)
cv2.putText(img,'WATER',(10,400),font,1,(255,255,255),2)
cv2.putText(img,'TOILET',(500,40),font,1,(255,255,255),2)
cv2.putText(img,'DOCTOR',(450,400),font,1,(255,255,255),2)
cv2.putText(img,'No selection',(100,200),font,1,(255,255,255),1)
while 1:
## cap.capture('image.bmp')
## img=cv2.imread('image.bmp')
## gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## eye0 = eye0_cascade.detectMultiScale(gray, 1.3, 5)
##
##
##
##
##
## flag=0
##
## for (x,y,w,h) in eye0:
##
##
## flag=1
## cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
## roi_gray = gray[y:y+h, x:x+w]
## roi_color = img[y:y+h, x:x+w]
## #print roi_color
##
## if flag == 1:
## print 'Eye Open'
## tym=0
## else:
## print 'Eye Closed'
## tym=tym+1
##
##
## if tym >2 :
## print 'Next'
##
## d.rotate(1)
## tym=0
##
## z=d[0]
## display(z)
check()
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
| [
"saeed4v@gmail.com"
] | saeed4v@gmail.com |
6e94b8ef6dd9af3e5218e7cac10b5f3da2521727 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_270/ch54_2020_03_27_00_44_11_634478.py | 4a4969942833af9e67d2166564562fd7a64f393d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | def calcula_fibonnacci(n):
k = 0
lista = []
soma = 1
while k < n:
lista.append(soma)
soma += soma
k += 1
return lista
| [
"you@example.com"
] | you@example.com |
3a51d7a1f84fc82bf0161ddbe7a7630337299c23 | 59a86a5b14dee6c6a328d85411776085c555fc2f | /.vim/global_ycm_extra_conf.py | 7d90e175f36b95813b5290575e3cdf334328ee45 | [] | no_license | kanadas/dotfiles | 6b6de909eb2265db7aa35afc1d1df8f4c5d5d298 | 783fdb9d53b76e648e5bacda3ce8fca19433c594 | refs/heads/master | 2023-08-08T11:57:45.753599 | 2023-07-31T20:27:19 | 2023-07-31T20:27:19 | 204,177,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | def FlagsForFile( filename, **kwargs ):
return {
'flags': [ '-x', 'c++', '-Wall', '-Wextra', '-O2', '-std=c++17' ],
}
| [
"kanas.tomasz@gmail.com"
] | kanas.tomasz@gmail.com |
6e2f1ee720e14dced37da2bed7d6c111bd4c19a6 | 24a7bc26a0daaa0e68d7923638d421debe01c456 | /src/signup/forms.py | 3501963697610a6bfdecc8dff0c4f8ce861067e9 | [] | no_license | promer94/opendata | 9ca56bf9e3cb57a1660d0dba6ebfabca7c535efe | 219cdd5ed817bce7f1a4bf633476069d06ae9455 | refs/heads/master | 2022-12-12T22:01:38.097929 | 2019-07-09T08:44:45 | 2019-07-09T08:44:45 | 141,059,631 | 0 | 0 | null | 2022-12-08T02:16:44 | 2018-07-15T21:27:53 | CSS | UTF-8 | Python | false | false | 4,176 | py | from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.utils.translation import gettext as _
def forbidden_username_validator(value):
forbidden_usernames = ['admin', 'settings', 'news', 'about', 'help',
'signin', 'signup', 'signout', 'terms', 'privacy',
'cookie', 'new', 'login', 'logout', 'administrator',
'join', 'account', 'username', 'root', 'blog',
'user', 'users', 'billing', 'subscribe', 'reviews',
'review', 'blog', 'blogs', 'edit', 'mail', 'email',
'home', 'job', 'jobs', 'contribute', 'newsletter',
'shop', 'profile', 'register', 'auth',
'authentication', 'campaign', 'config', 'delete',
'remove', 'forum', 'forums', 'download',
'downloads', 'contact', 'blogs', 'feed', 'feeds',
'faq', 'intranet', 'log', 'registration', 'search',
'explore', 'rss', 'support', 'status', 'static',
'media', 'setting', 'css', 'js', 'follow',
'activity', 'questions', 'articles', 'network', ]
if value.lower() in forbidden_usernames:
raise ValidationError(
_('This is a reserved word.'), code='name_forbid')
def invalid_username_validator(value):
if '@' in value or '+' in value or '-' in value:
raise ValidationError(
_('Enter a valid username.'), code='name_invalid',)
def unique_email_validator(value):
if User.objects.filter(email__iexact=value).exists():
raise ValidationError(
_('User with this Email already exists.'), code='email_exists')
def unique_username_ignore_case_validator(value):
if User.objects.filter(username__iexact=value).exists():
raise ValidationError(
_('User with this Username already exists.'), code='name_exists')
class SignUpForm(forms.ModelForm):
username = forms.CharField(
label='Username',
widget=forms.TextInput(attrs={'class': 'form-control'}),
max_length=30,
required=True,
validators=[forbidden_username_validator,
invalid_username_validator, unique_username_ignore_case_validator]
)
password = forms.CharField(
label='Password',
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
confirm_password = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
label="Confirm your password",
required=True)
email = forms.CharField(
label='Email',
widget=forms.EmailInput(attrs={'class': 'form-control'}),
required=True,
max_length=75,
validators=[unique_email_validator, validate_email])
current_weight = forms.IntegerField(
label='Current weight(kg)',
widget=forms.NumberInput(attrs={'class': 'form-control'}),
required=True,
min_value=40,
max_value=300)
current_height = forms.IntegerField(
label='Current height(cm)',
widget=forms.NumberInput(attrs={'class': 'form-control'}),
required=True,
min_value=100,
max_value=250)
goal_weight = forms.IntegerField(
label='Goal weight(kg)',
widget=forms.NumberInput(attrs={'class': 'form-control'}),
required=True,
min_value=40,
max_value=300)
class Meta:
model = User
exclude = ['last_login', 'date_joined']
fields = ['username', 'email', 'password', 'confirm_password', ]
def clean(self):
super(SignUpForm, self).clean()
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data.get('confirm_password')
if password and password != confirm_password:
message = 'Passwords don\'t match'
self.add_error('password', message)
return self.cleaned_data
| [
"yixuanxu94@gmail.com"
] | yixuanxu94@gmail.com |
49496dffd58966113b9481eed7b4792e626e4b6f | 39cacea0061a9806f0099801b43ab6191caa88e6 | /backEnd/cParent.py | 998e6bb10b03e08480043be617a727b660df4ed7 | [] | no_license | matthewJ1981/ODU_411_Project | e2e4e24078462d6bd9fcb9e02641d3b85c58e5fd | 46b07f0de7dafe2add9baebaf9943a9757a65a18 | refs/heads/master | 2023-07-01T23:15:22.452058 | 2021-08-08T23:45:19 | 2021-08-08T23:45:19 | 394,091,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,774 | py | import threading
from cChild import Child
from cHelpResponse import HelpResponse
import datetime as dt
class Parent():
def __init__(self, In, db):
self.updateMembers(In)
self.db = db
self.initChildren()
self.initHelpResponses()
self.updates = []
def updateMembers(self, In):
(self.id,
self.username,
self.password,
self.email,
self.firstName,
self.lastName,
self.phoneNum) = In
#self.updates stores a list of tuples of the form (table, index)
def pushUpdates(self):
#print("Parent pushUpdates")
for t in self.updates:
if t[0] == 'child':
#print("Child")
child = self.children[t[1]]
id = self.db.child.add(child.getName(), child.getAge(), self.id)
child.setID(id)
elif t[0] == "help_response":
#print("Response")
response = self.helpResponses[t[1]]
id = self.db.helpResponse.add(response.getDateTime(), self.id, response.getChildID(), response.getMsg())
response.setID(id)
else:
#print("Parent")
if t[1] == 'password':
self.db.parent.setPassword(self.id, self.password)
elif t[1] == 'email':
self.db.parent.setEmail(self.id, self.email)
elif t[1] == 'firstName':
self.db.parent.setFirstName(self.id, self.firstName)
elif t[1] == 'lastName':
self.db.parent.setLastName(self.id, self.lastName)
elif t[1] == 'phone':
self.db.parent.setPhoneNumber(self.id, self.phoneNum)
self.updates = []
#updates members from database
def update(self, In = None):
if self.id == -1:
print("Invalid id")
return
if In == None:
In = self.db.parent.getByID(self.id)
print("Update: " + str(In))
self.updateMembers(In)
#load children from database
def initChildren(self):
result = []
for child in self.db.getChildrenByID(self.id):
result.append(Child(child, self.db))
self.children = result
#load help reponses from database
def initHelpResponses(self):
def sortBy(res):
return res.id
result = []
for response in self.db.helpResponse.getByResponder(self.id):
result.append(HelpResponse(response, self.db))
result.sort(reverse = True, key = sortBy)
self.helpResponses = result
#Add a new child. CHild will be added to the database via the pushupdates method
def addChild(self, name, age):
self.children.append(Child((-1, name, age, 0, 0, 1, self.id, ""), self.db))
self.updates.append(('child', len(self.children) - 1))
#Add a new helpresponse. Response will be added to the database via the pushupdates method
def addHelpResponse(self, msg, dateTime, childID):
# dateTime = self.db.formatDateTime(dt.datetime.now())
self.helpResponses.append(HelpResponse((-1, dateTime, msg, None, self.id, None, childID, 1), self.db))
self.updates.append(('help_response', len(self.helpResponses) - 1))
def getChildren(self):
return self.children
def getHelpResponses(self):
return self.helpResponses
def getID(self):
return self.id
def getUserName(self):
return self.username
def getPassword(self):
return self.password
def getEmail(self):
return self.email
def getFirstName(self):
return self.firstName
def getLastName(self):
return self.lastName
def getPhoneNumber(self):
return self.phoneNum
def setPassword(self, newValue):
self.password = self.db.parent.hashPassword(newValue)
self.updates.append(('parent', 'password'))
def setEmail(self, newValue):
self.email = newValue
self.updates.append(('parent', 'email'))
def setFirstName(self, newValue):
self.firstName = newValue
self.updates.append(('parent', 'firstName'))
def setLastName(self, newValue):
self.lastName = newValue
self.updates.append(('parent', 'lastName'))
def setPhoneNumber(self, newValue):
self.phone = newValue
self.updates.append(('parent', 'phone'))
#Debug output, still not complete
def print(self):
print("Name: " + str(self.firstName) + " Phone: " + str(self.phoneNum))
for c in self.children:
c.print()
for r in self.helpReponses:
r.print() | [
"67207999+matthewJ1981@users.noreply.github.com"
] | 67207999+matthewJ1981@users.noreply.github.com |
02a2622141424da0610a78ceb8ae5303fe1b3056 | bffc27c54f0e0712a195e7fc7e50b6399da20074 | /aes/aes_.py | 5b233bcf78bac0b1a3684d1592dc6e4fb1f3b4ba | [] | no_license | mingmingli916/MyAITools | 6d85a6a8d2770b5d69f86baabcf74f37ad2cc9cb | 27fae7c92331fe801b97e15e0ff8900e9e323ea9 | refs/heads/master | 2023-04-01T05:26:29.310976 | 2021-04-22T02:54:23 | 2021-04-22T02:54:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | # Author: Hack Chyson
# [2018-12-20 11:27:27]
import sys
import base64
# package: cryptodome
from Crypto.Cipher import AES
# all the encrypt and decrypt operations is based on binary data
# this avoid many encoding problems
# base64 is used for human reading
# because you cannot copy a binary content in the console or somewhere else
# base64 can convert any character,
# so it is used between binary and character
# if the text is not a multiple of 16, add some characters
# so that it become a multiple of 16
def multiple_of_16(text):
while len(text) % 16 != 0:
text += ' '
return str.encode(text)
# key = '123456'
# text = 'hello world'
mode = sys.argv[1]
key = sys.argv[2]
text = sys.argv[3]
# all the encryption and decryption are based on binary
key_bin = multiple_of_16(key)
text_bin = multiple_of_16(text)
aes = AES.new(key_bin, AES.MODE_ECB) # init cipher
def encrypt():
encrypted_bin = aes.encrypt(text_bin)
encrypted_text = str(base64.encodebytes(encrypted_bin), encoding='utf8').rstrip('\n') # ?
return encrypted_text
def decrypt():
to_decrypt_bin = base64.decodebytes(bytes(text, encoding='utf8'))
decrypted_bin = aes.decrypt(to_decrypt_bin)
decrypted_text = str(decrypted_bin.decode('utf8')).rstrip(' ')
return decrypted_text
if 'enc' == mode or 'encrypt' == mode:
print(encrypt())
elif 'dec' == mode or 'decrypt' == mode:
print(decrypt())
else:
print('usage: python aes.py enc|dec <key> <text>')
| [
"chyson@aliyun.com"
] | chyson@aliyun.com |
d573f7332cfe153f609f7786b789b43c67bacf8c | 2a31c665347e28871674a9aad41019a3adf1dcd2 | /oraclenosql/Jython_oraclenosql.py | 7d442a40a4e282301f14ad3d9a287aa3e76d8381 | [] | no_license | abruneauOracle/14a | 2a2c59e5d183ef86664de2cb3da7feb6f27aa483 | 77f0c1cce70231bc6778998ddd50c2faec659b2d | refs/heads/master | 2021-01-22T22:08:45.466056 | 2012-01-23T19:47:15 | 2012-01-23T19:47:15 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 15,275 | py | # Jython script to manipulate data in Oracle NoSQL databases, community edition.
# -*- coding: iso-8859-1 -*-
import sys
import jarray
import array
import inspect
from java.util import ArrayList
from java.util import List
from java.util import Iterator
from java.util import SortedMap
def main():
# Read arguments passed in the command line.
# sys.argv[0] contains the name of the script file, e.g.
# Jython_oraclenosql.py.
# optparse is deprecated since Python 2.7, so better don't use Jython's
# equivalent.
# As part of the import block, kvclient.jar has to be passed to the script
# like:
# sys.path.append('/opt/kv-1.2.123/lib/kvclient-1.2.123.jar')
# I have absolutely no idea of how to append a jar file to the sy.path like
# sys.path.append('/opt/kv-1.2.123/lib/kvclient-1.2.123.jar') in the
# command line, so I do it here with brute force:
global storeName
global connectionString
global kvclientpath
arglen = len(sys.argv)
isTest = False
if (arglen > 1):
for i in range (1, arglen):
myarg = (sys.argv[i]).lower()
if (myarg == "-test"):
isTest = True
continue
if (myarg.startswith("-storename")):
myArray = myarg.split("=")
storeName = myArray[1]
continue
if (myarg.startswith("-connectionstring")):
myArray = myarg.split("=")
connectionString = myArray[1]
continue
if (myarg.startswith("-kvclientpath")):
myArray = myarg.split("=")
kvclientpath = myArray[1]
sys.path.append(kvclientpath)
continue
if (sys.argv[i] == "help"):
_printUsage()
break
global KVStore
global KVStoreConfig
global KVStoreFactory
global Key
global Value
global ValueVersion
global Direction
from oracle.kv import KVStore
from oracle.kv import KVStoreConfig
from oracle.kv import KVStoreFactory
from oracle.kv import Key
from oracle.kv import Value
from oracle.kv import ValueVersion
from oracle.kv import Direction
if isTest:
test(storeName, connectionString)
# Prints usage message and exit.
def _printUsage():
print "Usage: "
print "Interactive mode:"
print "jython -i /absolute/path/Jython_oraclenosql.py arg1 arg2 arg3 arg4"
print "Non-Interactive mode: "
print "jython /absolute/path/Jython_oraclenosql.py arg1 arg2 arg3 arg4"
print "Valid arguments and examples:"
print "-kvclientpath=/opt/kv-1.2.123/lib/kvclient-1.2.123.jar"
print "-storename=Name_of_the_store"
print "-connectionstring=host_name:port"
print "-test"
print "help"
# Prints errorMessage, sets it to "" and returns.
def _printErrorMessage(myErrorMessage):
global errorMessage
errorMessage = myErrorMessage
if (errorMessage != ""):
print errorMessage
errorMessage = ""
return
def _validateConnectionString(connectionString):
# connectionString should be string:integer.
global errorMessage
errorMessage = ""
myArray = connectionString.split(":")
if (len(myArray) != 2):
errorMessage = "ERROR: The connection string must include the host name \n"
errorMessage += "and the port in the form host:port.\n"
errorMessage += "e.g. connect(\"myhost\",\"localhost:5000\")"
return errorMessage
try:
int (myArray[1])
except ValueError:
errorMessage = "ERROR: The port must be an Integer."
return errorMessage
def connect(storeName, connectionString):
# Catch a java exception.
# http://stackoverflow.com/questions/2045786/how-to-catch-an-exception
# -in-python-and-get-a-reference-to-the-exception-withou
global errorMessage
global positiveMessage
global store
assert isinstance(storeName, str), "ERROR: Please enter a String as the name of the store."
if not isinstance (connectionString, str):
print ("ERROR: Please enter a String as the connections string.")
print ("e.g. connect (\"mystore\",\"localhost:5000\")")
return
errorMessage = _validateConnectionString(connectionString)
if (errorMessage != ""):
print errorMessage
errorMessage = ""
return
hosts = [connectionString]
try:
kVStoreConfig = KVStoreConfig(storeName, hosts)
store = KVStoreFactory.getStore(kVStoreConfig)
message = "Connected to the Oracle NoSQL store: \"" + storeName + "\"."
print message
positiveMessage = "connect: passed"
except:
instance = sys.exc_info()[1]
errorMessage = "ERROR: Connection to the store: " + str(instance)
print errorMessage
errorMessage = ""
return
def _checkStore():
global errorMessage
errorMessage = ""
try:
global store
store
except:
errorMessage = "ERROR: Define the store connection first. \n"
errorMessage += "Type: \n"
errorMessage += "connect(\"Store_Name\", \"Connection_String\")\n."
errorMessage += "e.g. connect(\"mystore\",\"localhost:5000\")"
return errorMessage
def _prepareKey(keysString):
# e.g. keysString = "Test/HelloWorld/Java/-/message_text"
# myKey contains either an error message or a Key.
global myKey
majorComponents = ArrayList()
minorComponents = ArrayList()
global errorMessage
if not isinstance (keysString, str):
errorMessage = "ERROR: Please enter a String as Key."
return
keysArray = keysString.split("/")
isMajor = True
for i in range (0, len(keysArray)):
if (keysArray [i] == "-"):
isMajor = False
if (isMajor):
majorComponents.add(keysArray [i])
else:
if (keysArray [i] != "-"):
minorComponents.add (keysArray [i])
if ((len (majorComponents) > 0) & (len (minorComponents) > 0)):
myKey = Key.createKey(majorComponents, minorComponents)
elif ((len (majorComponents) > 0) & (len (minorComponents) <= 0)):
myKey = Key.createKey(majorComponents)
else:
errorMessage = "ERROR: The String could not be transformed to a Key."
return
return myKey
def get(keysString):
# e.g. get("Test/HelloWorld/Java/-/message_text")
what = inspect.stack()[0][3]
valueString = ""
_storeFunctions(what, keysString, valueString)
return
def _storeFunctions(what, keysString, valueString):
# Use jarray to convert a String to a Java Bytes Array(String.getBytes()).
# store.delete(key) returns a bool.
# store.get returns None or the value.
global errorMessage
global positiveMessage
global myKey
global myValue
myKey = _prepareKey(keysString)
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
errorMessage = _checkStore()
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
if not isinstance (valueString, str):
message = "ERROR: Please enter a String as Value."
print (message)
return
store_function = getattr (store, "%s" % what)
try:
if ((what == "delete") | (what == "get")):
valueVersion = store_function(myKey)
if isinstance(valueVersion, bool):
print (valueVersion)
elif (valueVersion is not None):
myValue = valueVersion.getValue().getValue().tostring()
print (myValue)
else:
print (valueVersion)
else:
myValue = Value.createValue(jarray.array(valueString, 'b'))
store_function(myKey, myValue)
positiveMessage = what + ": passed"
except:
instance = sys.exc_info()[1]
errorMessage = "Error in store operation: " + str(instance)
print errorMessage
errorMessage = ""
return
return
def put(keysString, valueString):
# Usage: on a single line,
# put("Test/HelloWorld/Jython/-/message_text", "Hello World")
what = inspect.stack()[0][3]
_storeFunctions(what, keysString, valueString)
return
def putIfPresent(keysString, valueString):
# Usage: on a single line,
# putIfPresent("Test/HelloWorld/Jython/-/message_text", "Hello World")
what = inspect.stack()[0][3]
_storeFunctions(what, keysString, valueString)
return
def putIfAbsent(keysString, valueString):
# Usage: on a single line,
# putIfAbsent("Test/HelloWorld/Jython/-/message_text", "Hello World")
what = inspect.stack()[0][3]
_storeFunctions(what, keysString, valueString)
return
def delete(keysString):
# e.g. delete("Test/HelloWorld/Java/-/message_text")
what = inspect.stack()[0][3]
valueString = ""
_storeFunctions(what, keysString, valueString)
return
def multiDelete(keysString):
# To delete multiple records sharing the same major path components.
# e.g. multiDelete("Test/HelloWorld/Java/")
global errorMessage
global positiveMessage
global myKey
myKey = _prepareKey(keysString)
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
errorMessage = _checkStore()
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
try:
store.multiDelete(myKey, None, None)
positiveMessage = "multiDelete: passed"
except:
instance = sys.exc_info()[1]
errorMessage = "Error in multiDelete: " + str(instance)
print errorMessage
errorMessage = ""
return
return
def multiGet(keysString):
# To get multiple records sharing the same major path components.
# e.g. multiGet("Test/HelloWorld/Java/")
global errorMessage
global positiveMessage
global myKey
myKey = _prepareKey(keysString)
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
errorMessage = _checkStore()
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
try:
result = store.multiGet(myKey, None, None)
for myRecord in result.entrySet():
myValue = myRecord.getValue().getValue().getValue().tostring()
print(myValue)
positiveMessage = "multiGet: passed"
except:
instance = sys.exc_info()[1]
errorMessage = "Error in multiGet: " + str(instance)
print errorMessage
errorMessage = ""
return
return
def storeIterator(keysString):
# This only works for iterating over PARTIAL major components.
# Usage: storeIterator("Test/HelloWorld")
global errorMessage
global positiveMessage
global myKey
myKey = _prepareKey(keysString)
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
errorMessage = _checkStore()
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
try:
iterator = store.storeIterator(Direction.UNORDERED, 0, myKey, None, None)
while (iterator.hasNext()):
keyValueVersion = iterator.next()
key = keyValueVersion.getKey().toString()
valueArray = keyValueVersion.getValue().getValue()
valueArray.tostring()
# no attr valueArray.toString()
print (key + ", " + valueArray.tostring().decode("iso-8859-1"))
positiveMessage = "storeIterator: passed"
except:
instance = sys.exc_info()[1]
errorMessage = "Error in storeIterator: " + str(instance)
print errorMessage
errorMessage = ""
return
return
def countAll():
global errorMessage
global positiveMessage
errorMessage = _checkStore()
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
try:
iterator = store.storeKeysIterator(Direction.UNORDERED, 0)
i = 0
while (iterator.hasNext()):
i = i + 1
iterator.next()
print ("Total number of Records: " + str(i))
positiveMessage = "countAll: passed"
except:
instance = sys.exc_info()[1]
errorMessage = "Error in countAll(): " + str(instance)
print errorMessage
errorMessage = ""
return
return
def getAll():
global errorMessage
global positiveMessage
errorMessage = _checkStore()
if (errorMessage != ""):
print (errorMessage)
errorMessage = ""
return
try:
iterator = store.storeKeysIterator(Direction.UNORDERED, 0)
while (iterator.hasNext()):
element = iterator.next()
valueString = store.get(element).getValue().getValue().tostring()
print (element.toString())
positiveMessage = "getAll: passed"
except:
instance = sys.exc_info()[1]
errorMessage = "Error in getAll(): " + str(instance)
print errorMessage
errorMessage = ""
return
return
def version():
print ("0.1.5")
def _evalPositiveMessage():
global positiveMessage
global nFunctionsPassedTest
global nFunctionsTested
if (positiveMessage is not ""):
print (positiveMessage)
nFunctionsPassedTest = nFunctionsPassedTest + 1
else:
print ("NOT PASSED")
positiveMessage = ""
nFunctionsTested = nFunctionsTested + 1
return
def test(storeName, connectionString):
# Test all functions.
global positiveMessage
global nFunctionsPassedTest
global nFunctionsTested
nFunctionsPassedTest = 0
nFunctionsTested = 0
connect(storeName, connectionString)
_evalPositiveMessage()
countAll()
_evalPositiveMessage()
put("MyTest/MComp2/-/mComp1/mComp2","Johannes Läufer")
_evalPositiveMessage()
get("MyTest/MComp2/-/mComp1/mComp2")
_evalPositiveMessage()
putIfAbsent("MyTest/MComp2/-/mComp1/mComp3","Juanito el Caminante")
_evalPositiveMessage()
putIfPresent("MyTest/MComp2/-/mComp1/mComp2","Johannes Lufer 2")
_evalPositiveMessage()
getAll()
_evalPositiveMessage()
storeIterator("MyTest")
_evalPositiveMessage()
multiGet("MyTest/MComp2")
_evalPositiveMessage()
delete("MyTest/MComp2/-/mComp1/mComp2")
_evalPositiveMessage()
multiDelete("MyTest/MComp2")
_evalPositiveMessage()
print (str(nFunctionsPassedTest) + " functions passed out of " + \
str(nFunctionsTested))
countAll()
nFunctionsPassedTest = 0
nFunctionsTested = 0
return
global storeName
global connectionString
global kvclientpath
global store
global myKey
global myValue
global errorMessage
global positiveMessage
global nFunctionsPassedTest
global nFunctionsTested
errorMessage = ""
positiveMessage = ""
nFunctionsPassedTest = 0
nFunctionsTested = 0
# Defaults.
storeName = "mystore"
connectionString = "localhost:5000"
kvclientpath = "/opt/kv-1.2.123/lib/kvclient-1.2.123.jar"
# Start.
main() | [
"yeysus@googlemail.com"
] | yeysus@googlemail.com |
76a7107978730e9625dbaed329e65f2e265b36d6 | 70b4d0dd59bdf3ef9cabefdf4c0d7e3174aae617 | /archive/oz/check.py | 2cde9a0ba890dba4cebed0c6958405a5eebe12a7 | [] | no_license | ab/bin | 0407002cd2d2614893f6a28e8d07ada5ecffe0d0 | f7b3c776bbc101de3f32afb0a84b5498cb7b3df0 | refs/heads/main | 2023-04-14T12:03:09.434014 | 2023-04-06T16:29:41 | 2023-04-06T16:29:41 | 5,051,936 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,648 | py | #!/usr/bin/env python
import re
import sys
import optparse
import zlib
def checksum(fname, sum_fun, binary_mode=False, raw=False):
if fname == '-':
f = sys.stdin
else:
opts = 'rb' if binary_mode else 'r'
f = open(fname, opts)
running_sum = sum_fun('')
while True:
buf = f.read(4096)
if not buf:
break
running_sum = sum_fun(buf, running_sum)
if raw:
return running_sum & 0xffffffff
else:
return '%08x' % (running_sum & 0xffffffff)
def check_list(checkfile_name, sum_fun, win_mode=False):
if checkfile_name == '-':
f = sys.stdin
else:
f = open(checkfile_name, 'r')
if win_mode:
exp = r'^(?P<file>.*) (?P<value>[a-fA-F0-9]{8})$'
else:
exp = r'^(?P<value>[a-fA-F0-9]{8}) (\?ADLER32)?( |\*)(?P<file>.*)$'
regex = re.compile(exp)
for line in f:
if line.startswith('#') or line.startswith(';'):
continue
x = regex.search(line.rstrip('\r\n'))
if not x:
print 'WARNING: Failed to parse:', line
continue
value = x.group('value').lower()
fname = x.group('file').replace('\\', '/')
print fname+':',
try:
newsum = checksum(fname, sum_fun)
except IOError, e:
if e.errno != 2:
raise
print 'FAILED open or read'
else:
if newsum == value:
print 'OK'
else:
print 'FAILED'
def make_list(filenames, sum_fun):
for name in filenames:
c = checksum(name, sum_fun)
print '%s %s' % (c, name)
if __name__ == '__main__':
p = optparse.OptionParser(usage='%prog [OPTION] [FILE]...')
p.add_option('-c', '--check', dest='check', action='store_true',
help='read checksums from the FILEs and check them')
p.add_option('-w', '--win-format', dest='win_format', action='store_true',
help='assume checksum files are in windows format')
p.add_option('-a', '--algo', dest='algo', default='crc32',
help='algorithm: crc32 or adler32 [default: %default]')
opts, args = p.parse_args()
if len(args) == 0:
files = ['-']
else:
files = args
if opts.algo in ['crc', 'crc32']:
algo = zlib.crc32
elif opts.algo in ['adler', 'adler32']:
algo = zlib.adler32
else:
p.error('unknown algorithm: %s' % opts.algo)
if opts.check:
for name in files:
check_list(name, algo, opts.win_format)
else:
make_list(files, algo)
| [
"git@abrody.com"
] | git@abrody.com |
3a20f55f11d6ba81fd3043d1c0adefbc2d694ea8 | 4c4fea21488d8baea2256930c71000e4ee3141da | /first.py | 468761bce755e6b7db392447e71ee3bcbdc2af66 | [] | no_license | wyattwhat/cigarette-test | f9edae78c02bb675aea726f5490a67d57b07c88b | 1cc695b7bf907fa60071c577228cb5f5cee15427 | refs/heads/master | 2021-03-24T13:49:14.682724 | 2016-11-27T02:09:40 | 2016-11-27T02:09:40 | 74,861,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | import random
import sys
import os
age = input('what be yo age')
if age > 18 :
print('ciggie 4 yo')
elif age >= 69 :
print('yo 2 old 4 ciggie')
else :
print('yo 2 old to be ciggin')
| [
"noreply@github.com"
] | noreply@github.com |
d8a5803e900c1a81f57eb6e8232a6067e465a51c | 3c300c79359f1c989df4403835abbc5513364fee | /bitshares_tradehistory_analyzer/parser.py | 56c9520cd316cf14dfea532af60b1ebf20c94920 | [
"MIT"
] | permissive | ds-voting/bitshares-tradehistory-analyzer | 73ef81a1748fabef055f512b46366dc848c09a15 | 1dfd293dd6b4d692a078c403b79355fef0165799 | refs/heads/master | 2020-07-23T15:06:04.733405 | 2019-07-19T13:51:33 | 2019-07-19T13:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,029 | py | import copy
import logging
from decimal import Decimal
from bitshares.account import Account
from bitshares.amount import Amount
from bitshares.asset import Asset
from .consts import LINE_DICT_TEMPLATE
log = logging.getLogger(__name__)
class Parser:
""" Entries parser
:param BitShares bitshares_instance:
:param Account account:
"""
def __init__(self, bitshares_instance, account):
self.bitshares = bitshares_instance
self.account = Account(account, bitshares_instance=self.bitshares)
def parse_transfer_entry(self, entry):
""" Parse single transfer entry into a dict object suitable for writing line
:param dict entry: elastic wrapper entry
:return: dict object suitable for writing line
"""
op_id = entry['account_history']['operation_id']
op_date = entry['block_data']['block_time']
op = entry['operation_history']['op_object']
data = copy.deepcopy(LINE_DICT_TEMPLATE)
amount = Amount(op['amount_'], bitshares_instance=self.bitshares)
from_account = Account(op['from'], bitshares_instance=self.bitshares)
to_account = Account(op['to'], bitshares_instance=self.bitshares)
fee = Amount(op['fee'], bitshares_instance=self.bitshares)
log.info('Transfer: {} -> {}, {}'.format(from_account.name, to_account.name, amount))
if from_account.name == self.account.name:
data['kind'] = 'Withdrawal'
data['sell_cur'] = amount.symbol
data['sell_amount'] = amount.amount
data['fee_cur'] = fee.symbol
data['fee_amount'] = fee.amount
else:
data['kind'] = 'Deposit'
data['buy_cur'] = amount.symbol
data['buy_amount'] = amount.amount
data['comment'] = op_id
data['date'] = op_date
return data
def parse_trade_entry(self, entry):
""" Parse single trade entry (fill order) into a dict object suitable for writing line
:param dict entry: elastic wrapper entry
:return: dict object suitable for writing line
"""
op_id = entry['account_history']['operation_id']
op_date = entry['block_data']['block_time']
op = entry['operation_history']['op_object']
data = copy.deepcopy(LINE_DICT_TEMPLATE)
op = entry['operation_history']['op_object']
sell_asset = Asset(op['pays']['asset_id'], bitshares_instance=self.bitshares)
sell_amount = Decimal(op['pays']['amount']).scaleb(-sell_asset['precision'])
buy_asset = Asset(op['receives']['asset_id'], bitshares_instance=self.bitshares)
buy_amount = Decimal(op['receives']['amount']).scaleb(-buy_asset['precision'])
fee_asset = Asset(op['fee']['asset_id'], bitshares_instance=self.bitshares)
fee_amount = Decimal(op['fee']['amount']).scaleb(-fee_asset['precision'])
# Subtract fee from buy_amount
# For ccgains, any fees for the transaction should already have been substracted from *amount*, but included
# in *cost*.
if fee_asset.symbol == buy_asset.symbol:
buy_amount -= fee_amount
data['kind'] = 'Trade'
data['sell_cur'] = sell_asset.symbol
data['sell_amount'] = sell_amount
data['buy_cur'] = buy_asset.symbol
data['buy_amount'] = buy_amount
data['fee_cur'] = fee_asset.symbol
data['fee_amount'] = fee_amount
data['comment'] = op_id
data['order_id'] = op['order_id']
data['prec'] = max(sell_asset['precision'], buy_asset['precision'])
# Prevent division by zero
price = Decimal('0')
price_inverted = Decimal('0')
if sell_amount and buy_amount:
price = buy_amount / sell_amount
price_inverted = sell_amount / buy_amount
data['price'] = price
data['price_inverted'] = price_inverted
data['date'] = entry['block_data']['block_time']
return data
| [
"vvk@vvk.pp.ru"
] | vvk@vvk.pp.ru |
f0fa6497a9b7d53518ef748ec3971b096f59330f | a4286e173fb6c5ffe90c4faea2042fd230fd09cb | /general_highways/scat_second_pert/scattering_map.py | 3d9dfbb38dab159b0ae1abff26a5d5d0147addeb | [] | no_license | schaeferrodrigo/highways | 5238be1635370de27800e67d3bfd5835028a1732 | e0e9ef0c014b994c39acdebeba068a97042a1bb2 | refs/heads/master | 2023-02-19T15:10:02.560295 | 2021-01-19T14:56:58 | 2021-01-19T14:56:58 | 279,829,984 | 0 | 0 | null | 2021-01-15T11:54:55 | 2020-07-15T09:51:01 | Python | UTF-8 | Python | false | false | 3,637 | py | # -*- coding: utf-8 -*-
#==============================================================================
import numpy as np
import parameeters as par
import alpha as al
import tau_star as tau
import matplotlib.pyplot as plt
import functions as fun
import newton_method_vv as new
import bifurcation as bif
step_1 = 0.1
domain_theta = np.linspace( 0 ,2 * np.pi , 2 * np.pi / step_1)
domain_I = [x for x in np.linspace(-3 , 3, 6/step_1) if x != 1 ]
def assign_tau( I, theta ):
value_of_tau_pos = tau.tau( I , theta , 'pos' )
value_of_tau_neg = tau.tau( I , theta , 'neg' )
if np.minimum( np.abs(value_of_tau_neg) , np.abs(value_of_tau_pos) ) == np.abs(value_of_tau_neg):
val_of_tau = value_of_tau_neg
else:
val_of_tau = value_of_tau_pos
return val_of_tau
def config_graph( domain_I , domain_theta , step):
plt.axis([0,len(domain_theta),0,len(domain_I)])
plt.xlabel(r'$\theta$', fontsize = 30)
plt.ylabel('$I$' , fontsize = 30 )
plt.yticks(np.arange(0,len(domain_I),1/step),('-3','-2','-1','0','1','2','3'),fontsize = 20 )
plt.xticks(np.arange(0,len(domain_theta),(np.pi/2)/step),('0','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$'), fontsize = 20 )
def SM( domain_I , domain_theta ,step_1 ):
fig = plt.figure(facecolor= 'white')
bifurcation = bif.singularity_in_I( domain_I , par.mu)
for I in bifurcation:
plt.axhline( (I + 3)/step_1 , linestyle = 'dashed' , color = 'black' , linewidth = '1.5' )
poin_fun_theta , poin_fun_I = [] , []
dotI_theta , dotI_I , dotTheta_theta , dotTheta_I = [] , [] ,[] ,[]
tang_min , tang_max , tang_I = [] , [] , []
for I in domain_I:
print 'I = ' , I
for theta in domain_theta:
value_of_tau = assign_tau( I , theta )
poin_fun_theta.append( fun.red_poi_fun(I, theta , value_of_tau) )
dotI_theta.append( fun.dot_I( I , theta, value_of_tau) )
dotTheta_theta.append( fun.dot_theta( I , theta , value_of_tau ))
poin_fun_I.append( poin_fun_theta )
poin_fun_theta = []
dotI_I.append( dotI_theta )
dotTheta_I.append( dotTheta_theta)
dotI_theta , dotTheta_theta = [] , []
SM_table = np.array(poin_fun_I)
behavior_I = np.array( dotI_I)
behavior_theta = np.array( dotTheta_I)
#
def estr( x , y ):
estr = SM_table[ y , x ]
return estr
#
def beh_I( x , y ):
beh = behavior_I[ y , x ]
return beh
#
def beh_theta( x , y ):
beh = behavior_theta[ y , x ]
return beh
#
y = np.arange(0 , len( domain_I ) , 1 )
x = np.arange( 0 , len( domain_theta ) , 1 )
x, y = np.meshgrid(x,y)
z = estr( x , y )
plt.contour( x , y , z , 20 , colors = 'blue' , linestyles = 'solid' )
z_1 = beh_I( x , y )
plt.contourf( x , y , z_1 , [-1000 , -0.000001 , 0.000001 , 1000] , colors = ( 'red' , 'white' , 'green' ) , alpha = 0.3)
plt.contour( x , y , z_1 , levels = [ 0 ] , colors = 'black' , linestyles = 'solid' )
z_2 = beh_theta( x , y )
plt.contour( x , y , z_2 , levels = [ 0 ] , colors = 'red' , linestyles = 'solid' )
plt.plot( tang_max , tang_I , '.' , color = 'green')
plt.plot( tang_min , tang_I , '.' , color = 'green')
config_graph( domain_I , domain_theta ,step_1)
#fig.text(0.06 , 0.3 , round(par.mu , 2) , ha = 'left', fontsize = 15)
#fig.text(0.01 , 0.3 , r'$\mu=$', ha = 'left' , fontsize = 15)
name_file = str(par.mu)+ '.png'
plt.savefig(name_file)
SM(domain_I, domain_theta , step_1)
plt.show()
| [
"rodrigo@emp-50-21.eduroam.uu.se"
] | rodrigo@emp-50-21.eduroam.uu.se |
29275197f5f4876da65b5163b1b5f59fb7800fa8 | 108ad88a78f1ee4376f8628c4e3625e8b288a733 | /__init__.py | 04a703d03f7a206b73b4c768598f3cd013b6ae4e | [] | no_license | chr15m/chipvolver | bfcf166553b437c7aaeba062a4f3dcc51d4a50ec | 53b9752365272d565b3ede03e251816005b14d47 | refs/heads/master | 2021-01-17T06:27:36.546060 | 2016-07-12T09:25:26 | 2016-07-12T09:25:26 | 49,720,417 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | from chipvolver import *
| [
"chris@mccormick.cx"
] | chris@mccormick.cx |
f418c7e2f004fe16dd6a9ee2b33be0893bebb571 | d7c45763b67f5c36b4c8f274f430dff37c7d342e | /.ipynb_checkpoints/scrape_mars-checkpoint.py | 2b32a48c0617127fad513107b6b55bc15c45960a | [] | no_license | re-ga-na/webscrapingchallenge | 27d17becdc7e2df2b18aecf0138310f88ad4c0e1 | 04ade4080e1f317768ce6fff4dfadd5c7e23e387 | refs/heads/master | 2021-05-22T21:17:16.600970 | 2020-04-05T02:05:49 | 2020-04-05T02:05:49 | 253,100,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,325 | py | {
"cells": [
{
"cell_type": "code",
"execution_count": 211,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import requests\n",
"\n",
"from splinter import Browser\n",
"from selenium import webdriver\n",
"from bs4 import BeautifulSoup"
]
},
{
"cell_type": "code",
"execution_count": 212,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/usr/local/bin/chromedriver\r\n"
]
}
],
"source": [
"!which chromedriver"
]
},
{
"cell_type": "code",
"execution_count": 213,
"metadata": {},
"outputs": [],
"source": [
"#set executable path\n",
"\n",
"executable_path = {'executable_path': '/usr/local/bin/chromedriver'}\n",
"browser = Browser('chrome', **executable_path, headless=False)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# nasa mars news site\n"
]
},
{
"cell_type": "code",
"execution_count": 214,
"metadata": {},
"outputs": [],
"source": [
"news_url = \"https://mars.nasa.gov/news/\"\n",
"browser.visit(news_url)"
]
},
{
"cell_type": "code",
"execution_count": 215,
"metadata": {},
"outputs": [],
"source": [
"#parse with BeautifulSoup\n",
"html = browser.html\n",
"soup = BeautifulSoup(html, \"html.parser\")"
]
},
{
"cell_type": "code",
"execution_count": 216,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NASA's Perseverance Mars Rover Gets Its Wheels and Air Brakes\n",
"After the rover was shipped from JPL to Kennedy Space Center, the team is getting closer to finalizing the spacecraft for launch later this summer.\n"
]
}
],
"source": [
"#extract article title and text\n",
"article = soup.find('div', class_='list_text')\n",
"news_title = article.find('div', class_='content_title').text\n",
"news_p = article.find('div', class_='article_teaser_body').text\n",
"\n",
"print(news_title)\n",
"print(news_p)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# jpl mars space images"
]
},
{
"cell_type": "code",
"execution_count": 217,
"metadata": {},
"outputs": [],
"source": [
"images_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n",
"browser.visit(images_url)"
]
},
{
"cell_type": "code",
"execution_count": 218,
"metadata": {},
"outputs": [],
"source": [
"#expand full size image\n",
"fullsize = browser.find_by_id(\"full_image\")\n",
"fullsize.click()\n",
"\n",
"#expand more info\n",
"browser.is_element_present_by_text(\"more info\", wait_time=1)\n",
"moreinfo = browser.find_link_by_partial_text(\"more info\")\n",
"moreinfo.click()"
]
},
{
"cell_type": "code",
"execution_count": 219,
"metadata": {},
"outputs": [],
"source": [
"#parse with BeautifulSoup\n",
"\n",
"html_image = browser.html\n",
"soup = BeautifulSoup(html_image, 'html.parser')"
]
},
{
"cell_type": "code",
"execution_count": 220,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'/spaceimages/images/largesize/PIA19083_hires.jpg'"
]
},
"execution_count": 220,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"featured_image_url = soup.select_one(\"figure.lede a img\").get(\"src\")\n",
"featured_image_url"
]
},
{
"cell_type": "code",
"execution_count": 221,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"https://www.jpl.nasa.govhttp://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg\n"
]
}
],
"source": [
"#combine base url with image url\n",
"featured_image_url = f\"https://www.jpl.nasa.gov{img_url}\"\n",
"print(featured_image_url)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# mars weather"
]
},
{
"cell_type": "code",
"execution_count": 222,
"metadata": {},
"outputs": [],
"source": [
"weather_url = 'https://twitter.com/marswxreport?lang=en'\n",
"browser.visit(weather_url)"
]
},
{
"cell_type": "code",
"execution_count": 223,
"metadata": {},
"outputs": [],
"source": [
"#use requests library to get tweet\n",
"response = requests.get(weather_url)\n",
"soup = BeautifulSoup(response.text, 'lxml')"
]
},
{
"cell_type": "code",
"execution_count": 224,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"InSight sol 480 (2020-04-02) low -93.0ºC (-135.5ºF) high -6.7ºC (19.9ºF)\n",
"winds from the SW at 5.8 m/s (13.1 mph) gusting to 17.2 m/s (38.5 mph)\n",
"pressure at 6.50 hPapic.twitter.com/8oUTHBmcXp\n"
]
}
],
"source": [
"mars_weather = soup.find('p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text.strip()\n",
"print(mars_weather)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# mars facts"
]
},
{
"cell_type": "code",
"execution_count": 225,
"metadata": {},
"outputs": [],
"source": [
"facts_url = \"https://space-facts.com/mars/\"\n",
"browser.visit(facts_url)\n",
"html_url = browser.html"
]
},
{
"cell_type": "code",
"execution_count": 226,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>0</th>\n",
" <th>1</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Equatorial Diameter:</td>\n",
" <td>6,792 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>Polar Diameter:</td>\n",
" <td>6,752 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>Mass:</td>\n",
" <td>6.39 × 10^23 kg (0.11 Earths)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>Moons:</td>\n",
" <td>2 (Phobos & Deimos)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>Orbit Distance:</td>\n",
" <td>227,943,824 km (1.38 AU)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>Orbit Period:</td>\n",
" <td>687 days (1.9 years)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>Surface Temperature:</td>\n",
" <td>-87 to -5 °C</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>First Record:</td>\n",
" <td>2nd millennium BC</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>Recorded By:</td>\n",
" <td>Egyptian astronomers</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 0 1\n",
"0 Equatorial Diameter: 6,792 km\n",
"1 Polar Diameter: 6,752 km\n",
"2 Mass: 6.39 × 10^23 kg (0.11 Earths)\n",
"3 Moons: 2 (Phobos & Deimos)\n",
"4 Orbit Distance: 227,943,824 km (1.38 AU)\n",
"5 Orbit Period: 687 days (1.9 years)\n",
"6 Surface Temperature: -87 to -5 °C\n",
"7 First Record: 2nd millennium BC\n",
"8 Recorded By: Egyptian astronomers"
]
},
"execution_count": 226,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#retieve first table\n",
"mars_df = pd.read_html(facts_url)[0]\n",
"mars_df\n"
]
},
{
"cell_type": "code",
"execution_count": 227,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Value</th>\n",
" </tr>\n",
" <tr>\n",
" <th>Description</th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>Equatorial Diameter:</th>\n",
" <td>6,792 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Polar Diameter:</th>\n",
" <td>6,752 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Mass:</th>\n",
" <td>6.39 × 10^23 kg (0.11 Earths)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Moons:</th>\n",
" <td>2 (Phobos & Deimos)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Orbit Distance:</th>\n",
" <td>227,943,824 km (1.38 AU)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Orbit Period:</th>\n",
" <td>687 days (1.9 years)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Surface Temperature:</th>\n",
" <td>-87 to -5 °C</td>\n",
" </tr>\n",
" <tr>\n",
" <th>First Record:</th>\n",
" <td>2nd millennium BC</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Recorded By:</th>\n",
" <td>Egyptian astronomers</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Value\n",
"Description \n",
"Equatorial Diameter: 6,792 km\n",
"Polar Diameter: 6,752 km\n",
"Mass: 6.39 × 10^23 kg (0.11 Earths)\n",
"Moons: 2 (Phobos & Deimos)\n",
"Orbit Distance: 227,943,824 km (1.38 AU)\n",
"Orbit Period: 687 days (1.9 years)\n",
"Surface Temperature: -87 to -5 °C\n",
"First Record: 2nd millennium BC\n",
"Recorded By: Egyptian astronomers"
]
},
"execution_count": 227,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#rename columns and set index\n",
"mars_df.columns=[\"Description\", \"Value\"]\n",
"mars_df.set_index(\"Description\", inplace=True)\n",
"mars_df"
]
},
{
"cell_type": "code",
"execution_count": 228,
"metadata": {},
"outputs": [],
"source": [
"#convert table to html string\n",
"mars_df.to_html('table.html')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# mars hemispheres"
]
},
{
"cell_type": "code",
"execution_count": 229,
"metadata": {},
"outputs": [],
"source": [
"hemispheres_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n",
"browser.visit(hemispheres_url)\n",
"html_hemispheres = browser.html"
]
},
{
"cell_type": "code",
"execution_count": 230,
"metadata": {},
"outputs": [],
"source": [
"soup = BeautifulSoup(html_hemispheres, \"html.parser\")\n"
]
},
{
"cell_type": "code",
"execution_count": 231,
"metadata": {},
"outputs": [],
"source": [
"#empty list for hemisphere image urls\n",
"hemisphere_image_urls = []\n"
]
},
{
"cell_type": "code",
"execution_count": 232,
"metadata": {},
"outputs": [],
"source": [
"results = soup.find(\"div\", class_ = \"results\" )\n",
"\n",
"images = results.find_all(\"div\", class_=\"item\")\n"
]
},
{
"cell_type": "code",
"execution_count": 233,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[{'title': 'Cerberus Hemisphere ', 'img_url': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg'}, {'title': 'Schiaparelli Hemisphere ', 'img_url': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg'}, {'title': 'Syrtis Major Hemisphere ', 'img_url': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg'}, {'title': 'Valles Marineris Hemisphere ', 'img_url': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg'}]\n"
]
}
],
"source": [
"#looping through hemisphere images to extract urls abd titles\n",
"\n",
"for image in images:\n",
" title = image.find(\"h3\").text\n",
" title = title.replace(\"Enhanced\", \"\")\n",
" endlink = image.find(\"a\")[\"href\"]\n",
" img_url = \"https://astrogeology.usgs.gov/\" + endlink\n",
" \n",
" browser.visit(img_url)\n",
" html_links = browser.html\n",
" soup = BeautifulSoup(html_links, \"html.parser\")\n",
" downloads = soup.find(\"div\", class_=\"downloads\")\n",
" img_url = downloads.find(\"a\")[\"href\"]\n",
" hemisphere_image_urls.append({\"title\": title, \"img_url\": img_url})\n",
"\n",
"print(hemisphere_image_urls)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| [
"ralicka@rs-macbook-air.home"
] | ralicka@rs-macbook-air.home |
0e6b139dec6db4c8aa222b7937adfc0f12e6045a | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/heatmap/_y0.py | 8bae24f08506a20ba2d7ca6fb4ba46ef4651f570 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 441 | py | import _plotly_utils.basevalidators
class Y0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name='y0', parent_name='heatmap', **kwargs):
super(Y0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc+clearAxisTypes',
implied_edits={'ytype': 'scaled'},
role='info',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
63e492a0c5b9732d569943b85828ce1a87c9f932 | 53740aa8e4df56003806cbc7a3cbf7896daa8f3c | /Ejemplosbasicos/EjemploModulo.py | 2fa38ef7b5c2f4e9977509514288fae852e30ef6 | [] | no_license | lsilvaherszage/cursopython | b9184d9b7993a4512efd451f4fd476bcc53cca50 | 5a0abe321bfe0f1d811a921a990143740c2a54a7 | refs/heads/master | 2023-03-15T03:20:34.115572 | 2021-03-26T20:51:16 | 2021-03-26T20:51:16 | 337,919,539 | 0 | 0 | null | 2021-02-14T20:46:52 | 2021-02-11T03:27:08 | null | UTF-8 | Python | false | false | 480 | py | import sys
#import FuncionesUtiles
# import FuncionesUtiles as fu
# from FuncionesUtiles import cuadradoPerfecto
#from FuncionesUtiles import *
# import funciones
# print(FuncionesUtiles.cuadradoPerfecto(25))
# print(fu.cuadradoPerfecto(26))
# print(fu.enElMismoOrden("Este es un texto lindo", ["es", "texto"]))
# print(cuadradoPerfecto(16))
# print(enElMismoOrden("A ver si está en orden correcto", ["si", "orden"]))
print(sys.path)
# print(funciones.fun1("Esto es cadena")) | [
"gzabala@gmail.com"
] | gzabala@gmail.com |
ee9990dd7709468a094b746e39c632852ceebc14 | 91f70ceb73f813c9f01b4236005de4fd100f2fc3 | /src/pycropml/wf2xml.py | e3634ec7c96995b05dff340cc686d35241303719 | [
"MIT"
] | permissive | sielenk-yara/PyCrop2ML | 438ed2f0fefc707c650132512ee1c4d7f4b5c01a | dfcab79061fa71d4343120573b50b6232812999e | refs/heads/master | 2020-12-29T15:30:12.552225 | 2020-10-07T18:09:42 | 2020-10-07T18:09:42 | 238,654,199 | 0 | 0 | NOASSERTION | 2020-12-01T08:45:56 | 2020-02-06T09:42:11 | null | UTF-8 | Python | false | false | 3,843 | py | from openalea.core.external import *
from openalea.core.pkgmanager import PackageManager
from py.xml import Namespace
class ns(Namespace):
"Custom xml namespace"
class Wf2Xml(object):
""" Export an OpenAlea Workflow into a Crop2ML Model composite.
"""
def __init__(self, wf, pmanager=None):
""" Export a workflow into OpenAlea.
:Parameters:
- wf : an OpenAlea workflow (factory)
"""
self.wf = wf
if pmanager is None:
self.pmanager = PackageManager()
else:
self.pmanager = pmanager
def run(self):
""" Generate Crop2ML specification of a CompositeModel from a workflow. """
wf = self.wf
# ModelComposition name id version timestep
xml = ns.ModelComposition(name=wf.name, id=wf.package.name, version="001", timestep="1")
# Extract the description of the wf
# TODO: Do it in a generic way
doc = wf.description
docs = [x.strip() for x in doc.strip().split('\n')]
if len(docs) == 5:
title = docs[0]
if docs[1].startswith('Author'):
authors = docs[1].split(':')[1]
authors = authors.strip()
else:
authors = wf.authors
if docs[2].startswith('Reference'):
references = docs[2].split(':')[1]
references = references.strip()
else:
references = wf.description
if docs[3].startswith('Institution'):
institution = docs[3].split(':')[1]
institution = institution.strip()
else:
institution = wf.package.metainfo.get('institutes', '')
if docs[4].startswith('Abstract'):
abstract = docs[4].split(':')[1]
abstract = abstract.strip()
else:
abstract = ''
# Description
desc = ns.Description(
ns.Title(title),
ns.Authors(authors),
ns.Institution(institution),
ns.Reference(references),
ns.Abstract(abstract)
)
xml.append(desc)
composition = ns.Composition()
for k, v in wf.elt_factory.iteritems():
pkg, name = v
composition.append(ns.Model(name=name, id=pkg+'.'+name, filename='unit.'+name+'.xml'))
links = ns.Links()
nodes = wf.elt_factory
for eid, link in wf.connections.iteritems():
(source_vid, source_port, target_vid, target_port) = link
if source_vid == '__in__':
pkg, factory = nodes[target_vid]
nf = self.pmanager[pkg][factory]
_target = factory+'.'+nf.inputs[target_port]['name']
_source = wf.inputs[source_port]['name']
links.append(ns.InputLink(target=_target, source=_source))
elif target_vid == '__out__':
pkg, factory = nodes[source_vid]
nf = self.pmanager[pkg][factory]
_source = factory+'.'+nf.inputs[source_port]['name']
_target = wf.outputs[target_port]['name']
links.append(ns.OutputLink(source=_source, target=_target))
else:
pkg, factory = nodes[source_vid]
nf = self.pmanager[pkg][factory]
_source = factory+'.'+nf.inputs[source_port]['name']
pkg, factory = nodes[target_vid]
nf = self.pmanager[pkg][factory]
_target = factory+'.'+nf.inputs[target_port]['name']
links.append(ns.InternalLink(target=_target, source=_source))
composition.append(links)
xml.append(composition)
print(xml.unicode(indent=4).encode('utf8'))
return xml
| [
"christophe.pradal@inria.fr"
] | christophe.pradal@inria.fr |
0d2dcc849231e7db257596ad5c809789d9656d98 | 0a23232675cdc132b5bd3364989934ea6722dc23 | /readwriteFiles_pyrecipe/readJson.py | 3fee345f734b74082d52ceff8a246153e3cb8f2a | [] | no_license | Nitin-pgmr/ETL_With_Python | fdf74f0daf5bcd379ffe46cd133f8883574f2dcf | ece5246c5defce19abb2fcaa996e45d94969d841 | refs/heads/master | 2023-01-21T04:24:19.101609 | 2020-12-01T07:53:58 | 2020-12-01T07:53:58 | 317,454,253 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from faker import Faker
import json
with open("data.JSON","r") as f:
data = json.load(f)
print(data['records'][0]['name'])
| [
"varghesenitin60@gmail.com"
] | varghesenitin60@gmail.com |
c200f7c9266f997851b6130916f349f0d636f6ba | 62b1c9931118f2837cd4fd68bf71a56b016124ad | /AndrewNgCoursera/backpropagation.py | b5a7ff8aa613855040e14e520382314fffe7d2a7 | [] | no_license | dev-osiris/machine_learning | ab10af88d8ab41e880e5e7ef83a1f1dfa2b073fc | b2878419a32c455b3813a302ef6140438b2f4ce4 | refs/heads/main | 2023-06-17T13:13:58.421585 | 2021-07-11T06:49:59 | 2021-07-11T06:49:59 | 369,218,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | import matplotlib.pyplot as plt
import numpy as np
num_training_ex = 1
alpha = 0.0001
def create_data(count):
data = np.array([[0, 0, 0]])
output = []
for i in range(count):
mat = np.random.randint(low=-5, high=5, size=(1, 3))
if mat[0][0] > 0 > mat[0][2] and mat[0][1] >= 0:
output.append(1)
else:
output.append(0)
data = np.append(data, mat, axis=0)
output = np.array(output)
return data[1:, ].T, output.T
x, y = create_data(5000)
y = y.reshape((1, -1))
# weights and biases
w1 = np.random.rand(5, x.shape[0])
b1 = np.zeros((5, 1))
w2 = np.random.rand(5, 5)
b2 = np.zeros((5, 1))
w3 = np.random.rand(1, 5)
b3 = np.array([0]).reshape((1, 1))
def cross_entropy(prediction, target):
N = prediction.shape[1]
ce = -np.sum(target * np.log(prediction + 1e-9)) / N
return ce
def derivative_cross_entropy(y, yhat):
return np.where(y == 0, 1. / (1 - (yhat + 1e-10)), 1. / (yhat + 1e-10))
def derivative_tanh(x):
return 1. - np.tanh(x) ** 2
def hard_max(x):
return np.where(x > 0.5, 1, 0)
cost_list = []
for i in range(1000):
z1 = np.dot(w1, x) + b1
assert(z1.shape == (5, x.shape[1]))
A1 = np.tanh(z1)
assert(A1.shape == (5, x.shape[1]))
z2 = np.dot(w2, A1) + b2
assert(z2.shape == (5, x.shape[1]))
A2 = np.tanh(z2)
assert(A2.shape == (5, x.shape[1]))
z3 = np.dot(w3, A2) + b3
assert(z3.shape == (1, x.shape[1]))
A3 = z3
# print(f"A3: {A3}")
assert(A3.shape == (1, x.shape[1]))
cost = np.sum((A3 - y) ** 2) / 2
# cost2 = log_loss(y, A3)
# backpropagation
dz3 = A3 - y
# dz3 = derivative_cross_entropy(y, A3)
assert(dz3.shape == (1, x.shape[1]))
dw3 = (1 / num_training_ex) * np.dot(dz3, A2.T)
assert(dw3.shape == (1, 5))
db3 = (1 / num_training_ex) * np.sum(dz3, axis=1, keepdims=True)
assert(db3.shape == (1, 1))
dA2 = np.dot(w3.T, dz3)
# dz2_ = np.dot(w3.T, dz3) * derivative_tanh(z2)
dz2 = np.multiply(dA2, derivative_tanh(z2))
assert(dz2.shape == (5, x.shape[1]))
dw2 = (1 / num_training_ex) * np.dot(dz2, A1.T)
assert(dw2.shape == (5, 5))
db2 = (1 / num_training_ex) * np.sum(dz2, axis=1, keepdims=True)
assert(db2.shape == (5, 1))
dA1 = np.dot(w2.T, dz2)
# dz1_ = np.dot(w2.T, dz2) * derivative_tanh(z1)
dz1 = np.multiply(dA1, derivative_tanh(z1))
assert(dz1.shape == (5, x.shape[1]))
dw1 = (1 / num_training_ex) * np.dot(dz1, x.T)
# dw1_ = (1 / num_training_ex) * np.dot(dz1, np.dot(w1.T, dz1).T)
assert (dw1.shape == (5, x.shape[0]))
db1 = (1 / num_training_ex) * np.sum(dz1, axis=1, keepdims=True)
assert (db1.shape == (5, 1))
# update parameters
w1 = w1 - alpha * dw1
b1 = b1 - alpha * db1
w2 = w2 - alpha * dw2
b2 = b2 - alpha * db2
w3 = w3 - alpha * dw3
b3 = b3 - alpha * db3
cost_list.append(cost)
if i % 10 == 0:
print(f"{i} | {cost}")
# test_mat = np.array([[4, -1, -5], [4, 2, -3], [-2, -1, 5], [3, 2, -1], [1, 5, -5], [3, 3, -4],
# [1, -2, 1], ]).T
train_mat = x
train_answer = y
z1 = np.dot(w1, train_mat) + b1
A1 = np.tanh(z1)
z2 = np.dot(w2, A1) + b2
A2 = np.tanh(z2)
z3 = np.dot(w3, A2) + b3
A3 = z3
print(f"train error: {(np.sum(train_answer - hard_max(A3)) ** 2) / train_answer.shape[1]}")
test_mat, test_answer = create_data(1000)
test_answer = test_answer.reshape((1, -1))
z1 = np.dot(w1, test_mat) + b1
A1 = np.tanh(z1)
z2 = np.dot(w2, A1) + b2
A2 = np.tanh(z2)
z3 = np.dot(w3, A2) + b3
A3 = z3
print(f"test error: {(np.sum(test_answer - hard_max(A3)) ** 2) / test_answer.shape[1]}")
plt.plot(cost_list)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
983b5abcc3b091b3c8ac1fe62dfb7a5f192939c8 | 7b3293dc2c5ac23c421297ccdf7e6b16caddfb9d | /app.py | b164c4ee259b896991dbe0c4d2e7e0c23e44363e | [] | no_license | cps333/event_mgmt | fe2a5ca484a9701eb1a5db74d5ba83dbccc12f02 | 30707bc6f754325ce52f38b7bfb32dbb82828a81 | refs/heads/master | 2021-01-21T19:01:35.696553 | 2017-05-22T23:09:11 | 2017-05-22T23:09:11 | 92,106,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | import flask
Test Test Test Test | [
"congpeisong@gmail.com"
] | congpeisong@gmail.com |
be64fb8a420212ea18965024bd6040c28f3fefd0 | 69a76fc49d48896f17ae6756ac2bedf8c92f9fe8 | /myapp/forms.py | a9958f22622e43718189bf7ab58a5376adc5aedf | [] | no_license | raffaellgates/myproject | 6c3e6bc375a87daeb9df9f86509e16e1da0ff6fb | 24194a34581d240c111269940a7249d972ef231e | refs/heads/master | 2023-01-03T05:20:32.409192 | 2020-10-21T16:25:57 | 2020-10-21T16:25:57 | 305,606,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | from django import forms
from django.forms import ModelForm
from myapp.models import Compositor, Constelacao, Estrela, Hospede, Musica
class HospedeForm(ModelForm):
class Meta:
model = Hospede
fields = '__all__'
class CompositorForm(ModelForm):
class Meta:
model = Compositor
fields = '__all__'
widgets = {
'data_nasc': forms.DateInput(
attrs={
'type': 'date',
}
)
}
class MusicaForm(ModelForm):
class Meta:
model = Musica
fields = '__all__'
widgets = {
'data': forms.DateInput(
attrs={
'type': 'date',
}
)
}
class EstrelaForm(ModelForm):
class Meta:
model = Estrela
fields = '__all__'
class ConstelacaoForm(ModelForm):
class Meta:
model = Constelacao
fields = '__all__'
| [
"rafael12silva73@gmail.com"
] | rafael12silva73@gmail.com |
6121deada37a3df456da949b942af9bb9b38b24b | 6eba06b30d5d66bc055bf35582f941a8d25fcf57 | /build/lib.linux-x86_64-3.5/fairseq/models/lstm.py | dc8033bec7ec90e1ca73e7a765bfd890d9983f78 | [] | no_license | prpankajsingh/quick-edit | 5d2cb31383c3becc5b396d03079ad6bd3d708422 | 211ebb46ac0e457b796ed41615a20d81440dcf0a | refs/heads/master | 2020-04-16T23:18:49.107900 | 2018-08-13T11:37:28 | 2018-08-13T11:37:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,734 | py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import LanguagePairDataset
from . import FairseqEncoder, FairseqIncrementalDecoder, FairseqModel, register_model, register_model_architecture
@register_model('lstm')
class LSTMModel(FairseqModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', default=0.1, type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', default=None, type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-hidden-size', type=int, metavar='N',
help='encoder hidden size')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='number of encoder layers')
parser.add_argument('--encoder-bidirectional', action='store_true',
help='make all layers of encoder bidirectional')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', default=None, type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
help='decoder attention')
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument('--encoder-dropout-in', type=float, metavar='D',
help='dropout probability for encoder input embedding')
parser.add_argument('--encoder-dropout-out', type=float, metavar='D',
help='dropout probability for encoder output')
parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
help='dropout probability for decoder output')
@classmethod
def build_model(cls, args, src_dict, dst_dict):
"""Build a new model instance."""
if not hasattr(args, 'encoder_embed_path'):
args.encoder_embed_path = None
if not hasattr(args, 'decoder_embed_path'):
args.decoder_embed_path = None
if not hasattr(args, 'encoder_hidden_size'):
args.encoder_hidden_size = args.encoder_embed_dim
if not hasattr(args, 'decoder_hidden_size'):
args.decoder_hidden_size = args.decoder_embed_dim
if not hasattr(args, 'encoder_bidirectional'):
args.encoder_bidirectional = False
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_encoder_embed = None
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, src_dict, args.encoder_embed_dim)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path, dst_dict, args.decoder_embed_dim)
encoder = LSTMEncoder(
dictionary=src_dict,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
)
try:
attention = bool(eval(args.decoder_attention))
except TypeError:
attention = bool(args.decoder_attention)
decoder = LSTMDecoder(
dictionary=dst_dict,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=attention,
encoder_embed_dim=args.encoder_embed_dim,
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
)
return cls(encoder, decoder)
class LSTMEncoder(FairseqEncoder):
"""LSTM encoder."""
def __init__(
self, dictionary, embed_dim=512, hidden_size=512, num_layers=1,
dropout_in=0.1, dropout_out=0.1, bidirectional=False,
left_pad_source=LanguagePairDataset.LEFT_PAD_SOURCE,
pretrained_embed=None,
padding_value=0.,
):
super().__init__(dictionary)
self.num_layers = num_layers
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.bidirectional = bidirectional
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
else:
self.embed_tokens = pretrained_embed
self.lstm = LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout_out,
bidirectional=bidirectional,
)
self.left_pad_source = left_pad_source
self.padding_value = padding_value
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(self, src_tokens, src_lengths):
if self.left_pad_source:
# convert left-padding to right-padding
src_tokens = utils.convert_padding_direction(
src_tokens,
self.padding_idx,
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist())
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = Variable(x.data.new(*state_size).zero_())
c0 = Variable(x.data.new(*state_size).zero_())
packed_outs, (final_hiddens, final_cells) = self.lstm(
packed_x,
(h0, c0),
)
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(
packed_outs, padding_value=self.padding_value)
x = F.dropout(x, p=self.dropout_out, training=self.training)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
bi_final_hiddens, bi_final_cells = [], []
for i in range(self.num_layers):
bi_final_hiddens.append(
torch.cat(
(final_hiddens[2 * i], final_hiddens[2 * i + 1]),
dim=0).view(bsz, self.output_units))
bi_final_cells.append(
torch.cat(
(final_cells[2 * i], final_cells[2 * i + 1]),
dim=0).view(bsz, self.output_units))
return x, bi_final_hiddens, bi_final_cells
return x, final_hiddens, final_cells
def max_positions(self):
"""Maximum input length supported by the encoder."""
return int(1e5) # an arbitrary large number
class AttentionLayer(nn.Module):
def __init__(self, input_embed_dim, output_embed_dim):
super().__init__()
self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
self.output_proj = Linear(2*output_embed_dim, output_embed_dim, bias=False)
def forward(self, input, source_hids, src_lengths=None):
# input: bsz x input_embed_dim
# source_hids: srclen x bsz x output_embed_dim
# x: bsz x output_embed_dim
x = self.input_proj(input)
# compute attention
attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
attn_scores = F.softmax(attn_scores.t(), dim=1).t() # srclen x bsz
# sum weighted sources
x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
x = F.tanh(self.output_proj(torch.cat((x, input), dim=1)))
return x, attn_scores
class LSTMDecoder(FairseqIncrementalDecoder):
"""LSTM decoder."""
def __init__(
self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512,
num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True,
encoder_embed_dim=512, encoder_output_units=512,
pretrained_embed=None,
):
super().__init__(dictionary)
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
else:
self.embed_tokens = pretrained_embed
self.encoder_output_units = encoder_output_units
assert encoder_output_units == hidden_size, \
'{} {}'.format(encoder_output_units, hidden_size)
# TODO another Linear layer if not equal
self.layers = nn.ModuleList([
LSTMCell(
input_size=encoder_output_units + embed_dim if layer == 0 else hidden_size,
hidden_size=hidden_size,
)
for layer in range(num_layers)
])
self.attention = AttentionLayer(encoder_output_units, hidden_size) if attention else None
if hidden_size != out_embed_dim:
self.additional_fc = Linear(hidden_size, out_embed_dim)
self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
def forward(self, prev_output_tokens, encoder_out, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# get outputs from encoder
encoder_outs, _, _ = encoder_out[:3]
srclen = encoder_outs.size(0)
# embed tokens
x = self.embed_tokens(prev_output_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental generation)
cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')
if cached_state is not None:
prev_hiddens, prev_cells, input_feed = cached_state
else:
_, encoder_hiddens, encoder_cells = encoder_out[:3]
num_layers = len(self.layers)
prev_hiddens = [encoder_hiddens[i] for i in range(num_layers)]
prev_cells = [encoder_cells[i] for i in range(num_layers)]
input_feed = Variable(x.data.new(bsz, self.encoder_output_units).zero_())
attn_scores = Variable(x.data.new(srclen, seqlen, bsz).zero_())
outs = []
for j in range(seqlen):
# input feeding: concatenate context vector from previous time step
input = torch.cat((x[j, :, :], input_feed), dim=1)
for i, rnn in enumerate(self.layers):
# recurrent cell
hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
# hidden state becomes the input to the next layer
input = F.dropout(hidden, p=self.dropout_out, training=self.training)
# save state for next time step
prev_hiddens[i] = hidden
prev_cells[i] = cell
# apply attention using the last layer's hidden state
if self.attention is not None:
out, attn_scores[:, j, :] = self.attention(hidden, encoder_outs)
else:
out = hidden
out = F.dropout(out, p=self.dropout_out, training=self.training)
# input feeding
input_feed = out
# save final output
outs.append(out)
# cache previous states (no-op except during incremental generation)
utils.set_incremental_state(
self, incremental_state, 'cached_state', (prev_hiddens, prev_cells, input_feed))
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# srclen x tgtlen x bsz -> bsz x tgtlen x srclen
attn_scores = attn_scores.transpose(0, 2)
# project back to size of vocabulary
if hasattr(self, 'additional_fc'):
x = self.additional_fc(x)
x = F.dropout(x, p=self.dropout_out, training=self.training)
x = self.fc_out(x)
return x, attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')
if cached_state is None:
return
def reorder_state(state):
if isinstance(state, list):
return [reorder_state(state_i) for state_i in state]
return state.index_select(0, new_order)
if not isinstance(new_order, Variable):
new_order = Variable(new_order)
new_state = tuple(map(reorder_state, cached_state))
utils.set_incremental_state(self, incremental_state, 'cached_state', new_state)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return int(1e5) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.uniform_(-0.1, 0.1)
return m
def LSTM(input_size, hidden_size, **kwargs):
m = nn.LSTM(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if 'weight' in name or 'bias' in name:
param.data.uniform_(-0.1, 0.1)
return m
def LSTMCell(input_size, hidden_size, **kwargs):
m = nn.LSTMCell(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if 'weight' in name or 'bias' in name:
param.data.uniform_(-0.1, 0.1)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
@register_model_architecture('lstm', 'lstm')
def base_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', 512)
args.encoder_layers = getattr(args, 'encoder_layers', 1)
args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False)
args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', 512)
args.decoder_layers = getattr(args, 'decoder_layers', 1)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
args.decoder_attention = getattr(args, 'decoder_attention', True)
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
@register_model_architecture('lstm', 'lstm_wiseman_iwslt_de_en')
def lstm_wiseman_iwslt_de_en(args):
base_architecture(args)
args.encoder_embed_dim = 256
args.encoder_hidden_size = 256
args.encoder_layers = 1
args.encoder_bidirectional = False
args.encoder_dropout_in = 0
args.encoder_dropout_out = 0
args.decoder_embed_dim = 256
args.decoder_hidden_size = 256
args.decoder_layers = 1
args.decoder_out_embed_dim = 256
args.decoder_attention = True
args.decoder_dropout_in = 0
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(args):
base_architecture(args)
args.encoder_embed_dim = 1000
args.encoder_hidden_size = 1000
args.encoder_layers = 4
args.encoder_dropout_out = 0
args.encoder_bidirectional = False
args.decoder_embed_dim = 1000
args.decoder_hidden_size = 1000
args.decoder_layers = 4
args.decoder_out_embed_dim = 1000
args.decoder_attention = True
args.decoder_dropout_out = 0
| [
"nishit.n123@gmail.com"
] | nishit.n123@gmail.com |
6db846cc3de7d7f5c3535eafad242cb11e1da445 | 9dee94907e6456a4af9855d358693923c17b4e0d | /0111_Minimum_Depth_of_Binary_Tree.py | 711a407cde57b1a9863453b7f34b3ebbcf63c43b | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
# BFS find the first leaf
if not root:
return 0
queue = [root]
depth = 1
while len(queue) > 0:
new_queue = []
for i in range(len(queue)):
q = queue[i]
if not q.left and not q.right:
return depth
if q.left:
new_queue.append(q.left)
if q.right:
new_queue.append(q.right)
queue = new_queue
depth += 1
return 0 | [
"chien-wei@outlook.com"
] | chien-wei@outlook.com |
088afb93ee215a857db06af9d00d17651895e7ed | 06aaabe8863bcaf9595a4a07accf0283e42f35bf | /src/camserv/newcamserv.py | 955e6c3ea09352e88cddbcab2928c15000453864 | [] | no_license | bilalsadiq/Image-Video-Stabilization | 3961645b9504e66935660c1053c824b6463a50cf | 7e3054f7ae08396c38a819f9c33d5e59decdf6d3 | refs/heads/master | 2021-01-21T06:06:30.104627 | 2017-08-31T00:14:43 | 2017-08-31T00:14:43 | 101,939,392 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,243 | py | from picamera import *
from os import *
from time import *
from io import *
import pycurl
import base64
import json
import multiprocessing
from conf import *
DEBUG = True
def helper_thread(url='127.0.0.1'):
# req : Request object
# res : Response buffer / object
cam = PiCamera()
cam.resolution = CAM_RESOLUTION
cam.color_effects = CAM_BLACK_AND_WHITE
try:
req = pycurl.Curl()
req.setopt(req.URL, url)
req.setopt(req.POST, 1)
header = ['Content-Type: application/json','Accept: application/json']
req.setopt(req.HTTPHEADER, header)
stream = BytesIO()
for fr in cam.capture_continuous(stream, format='jpeg'):
try:
stream.truncate()
stream.seek(0)
payload = '{ "ack" : "?", "img" : "' + base64.b64encode(stream.getvalue()) + '" }'
res = BytesIO()
req.setopt(req.POSTFIELDS, payload)
req.setopt(req.WRITEFUNCTION, res.write)
req.perform()
if res:
try:
response = json.loads(res.getvalue())
if response['ack'] == 'bad': # Bad formatting?
if DEBUG:
print '[-] ERROR: The image POST failed!'
DEBUG = False
except Exception as e:
continue
else:
if DEBUG:
print '[-] ERROR: The server could not be reached!'
DEBUG = False
except pycurl.error as e:
continue
finally:
res.close()
except Exception as e:
print '[-] ERROR: ' + e.message
finally:
cam.close()
def main():
print '[?] Attempting to initiate daemon..'
try:
p = multiprocessing.Process( target=helper_thread, args=(HOOK_URL,))
p.daemon = True
p.start()
print '[^] Server creation successful!'
p.join()
except Exception as e:
print '[-] ERROR: The server could not be started!'
if __name__=='__main__':
main()
| [
"admin@BillWiTheSciencePi.local"
] | admin@BillWiTheSciencePi.local |
f32d1d14da6ef2f04aa921ee33f9c6bf783c1d2a | 6fa42c2dd3d2fad482e354495ee15616784425e8 | /farm_management/animals/migrations/0001_initial.py | 0875e45e464886f34342e695400496b8cca486e7 | [
"MIT"
] | permissive | alexanders0/farm-management | ccf74f9a9d99f4d20173e360e6f776288ce636f3 | 53ed821bbbed312848cf331f8f961ef16c59fb99 | refs/heads/main | 2023-07-18T08:51:20.876231 | 2021-09-01T04:22:59 | 2021-09-01T04:22:59 | 384,026,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,886 | py | # Generated by Django 3.1.12 on 2021-08-25 03:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('lands', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Breed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on wich the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on wich the object was last modified.', verbose_name='modified at')),
('name', models.CharField(max_length=100, verbose_name='breed name')),
('description', models.CharField(max_length=200, verbose_name='breed description')),
('purpose', models.CharField(max_length=30, verbose_name='breed purpose')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on wich the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on wich the object was last modified.', verbose_name='modified at')),
('name', models.CharField(max_length=100, verbose_name='group name')),
('description', models.CharField(max_length=200, verbose_name='group description')),
('land', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lands.land')),
('location', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='lands.paddock')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on wich the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on wich the object was last modified.', verbose_name='modified at')),
('name', models.CharField(max_length=150, verbose_name='animal name')),
('birth_date', models.DateField()),
('picture', models.ImageField(blank=True, null=True, upload_to='animals/pictures', verbose_name='animal picture')),
('gender', models.CharField(max_length=30, verbose_name='animal gender')),
('weight', models.FloatField(null=True)),
('breed', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='animals.breed')),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='animals.group')),
('land', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lands.land')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
| [
"alexandersn059@gmail.com"
] | alexandersn059@gmail.com |
44c1925930e893f90665e267105f0de38e06806c | 885a722e3e5814ae4942ac5e8cf8d0091e734b4c | /게임 개발_Python/CodingTest.py | 44a46c74629a33f66008719905f685de00396184 | [] | no_license | ledpear/algorithm | 52f3ea25842eee20b3bbd48e51825b9df4942e03 | 4922c6fe5ca0b98a90dee218b756006e7ba05d82 | refs/heads/master | 2023-06-09T17:47:45.674244 | 2023-06-03T13:47:11 | 2023-06-03T13:47:11 | 133,370,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,493 | py | n = 4
m = 4
pos_x = 1
pos_y = 1
dir = 0 # 0 : 북, 1 : 동, 2 : 남, 3 : 서
game_map = [[1,1,1,1], [1,0,0,1], [1,1,0,1], [1,1,1,1]]
bool_map = [[0,0,0,0], [0,0,0,0], [0,0,0,0], [0,0,0,0]]
bool_map[pos_y][pos_x] = 1
count = 0
score = 1
while True:
dir -= 1
if dir < 0 : dir = 3
bResult = False
if dir == 0 :
if pos_y - 1 >= 0 :
if game_map[pos_y - 1][pos_x] == 0 and bool_map[pos_y - 1][pos_x] == 0 :
pos_y -= 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 1 :
if pos_x + 1 < m :
if game_map[pos_y][pos_x + 1] == 0 and bool_map[pos_y][pos_x + 1] == 0 :
pos_x += 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 2 :
if pos_y + 1 < n :
if game_map[pos_y + 1][pos_x] == 0 and bool_map[pos_y + 1][pos_x] == 0 :
pos_y += 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 3 :
if pos_x - 1 >= 0 :
if game_map[pos_y][pos_x - 1] == 0 and bool_map[pos_y][pos_x - 1] == 0 :
pos_x -= 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
if bResult :
count = 0
else :
count += 1
if count == 4 :
if dir == 0 :
if pos_y + 1 < n :
if game_map[pos_y + 1][pos_x] == 0 :
pos_y += 1
count = 0
else :
break
else :
break
elif dir == 1 :
if pos_x - 1 >= 0 :
if game_map[pos_y][pos_x - 1] == 0 :
pos_x -= 1
count = 0
else :
break
else :
break
elif dir == 2 :
if pos_y - 1 >= 0 :
if game_map[pos_y - 1][pos_x] == 0 :
pos_y -= 1
count = 0
else :
break
else :
break
elif dir == 3 :
if pos_x + 1 < m :
if game_map[pos_y][pos_x + 1] == 0 :
pos_x += 1
count = 0
else :
break
else :
break
print(score) | [
"tjsrb75@gmail.com"
] | tjsrb75@gmail.com |
129bef2d66319480c6709c8a787ece635646e0a3 | b2eca564f48df7d7274467ac877f0e885008c8fe | /src/python_stack/django_projects/user_practice/apps/practice_users/apps.py | a900bfe2252dbd3e325190c7b51ae905dde0f6c6 | [] | no_license | Lckythr33/CodingDojo | 87104a34b1e5a26dd9c3877f6ac3f3d10075acfc | a6e329ea04516fc67c378781a81d32301ffc1fad | refs/heads/master | 2022-02-18T10:53:27.590534 | 2019-08-27T23:17:52 | 2019-08-27T23:17:52 | 183,742,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from django.apps import AppConfig
class PracticeUsersConfig(AppConfig):
name = 'practice_users'
| [
"Zeeshawn1@gmail.com"
] | Zeeshawn1@gmail.com |
c10efe05a237e5f2dc9b3be53519b97c67a9655e | 156ce3f9543b2daa940861541b6d0fbe9bdc3359 | /lab1.py | ece73aea392d91a0e17dbb937c9860c81a990de4 | [] | no_license | cpe202spring2019/lab1-sky-kurth | c8078341af6f21f26bbf4dac59b01f2f168f2991 | 8072e7a6f4af7db5e8b707dcaec8fe2fea905638 | refs/heads/master | 2020-05-05T00:37:48.797682 | 2019-04-12T16:12:14 | 2019-04-12T16:12:14 | 179,581,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,214 | py | def max_list_iter(int_list): # must use iteration not recursion
if int_list == []:
return None
elif int_list == None:
raise ValueError
else:
highest = int_list[0]
for i in int_list:
if i > highest:
highest = i
return highest
def reverse_rec(int_list): # must use recursion
if int_list == None:
raise ValueError
if int_list == [] :
return []
else:
return [int_list[-1]] + reverse_rec(int_list[:len(int_list) - 1])
def bin_search(target, low, high, int_list): # must use recursion
if int_list == None:
raise ValueError
test_index = int(low + (high - low) / 2)
if int_list[test_index] == target:
return test_index
elif high - low == 1:
if int_list[test_index + 1] == target:
return test_index + 1
else:
return None
elif int_list[test_index] > target:
high = test_index
return bin_search(target, low, high, int_list)
elif int_list[int(high / 2)] < target:
low = test_index
return bin_search(target, low, high, int_list)
| [
"noreply@github.com"
] | noreply@github.com |
77e6199d5c50898f9e8c1220fd067f25d7282b9f | 85e1f76e7817fe51a41e97e6a378ec40fb3f2e5e | /PWSGuatemala.py | 1313c3282e1b3c8fae10886eb76b84561edc8162 | [] | no_license | ileammontoya/Proyecto-OIDs | 5b95d637536aa9cbb9e49c0be128c86a0c9385e9 | b40039a4d0939c7ea8e112e9110a0c17d339993a | refs/heads/master | 2021-01-25T12:23:48.425631 | 2018-03-01T19:51:31 | 2018-03-01T19:51:31 | 123,469,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,559 | py | # coding=utf-8
import os
import stat
import openpyxl
wb=openpyxl.load_workbook("QOS_Guatemala.xlsx")
sheet=wb.get_sheet_by_name("Sheet1")
try:
os.remove("Powershell SNMP Script Guatemala.txt")
except OSError:
pass
row,column=2,1
salir = sheet.cell(row=row,column=column).value
while salir != None:
host=sheet["D"+str(row)].value
ip=sheet["E"+str(row)].value
community=sheet["G"+str(row)].value
if str(sheet["F"+str(row)].value)=="Active":
my_file=open("Powershell SNMP Script Guatemala.txt","a")
my_file.write('echo "#Hostname" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.2.1.1.5 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Indice de las interfaces" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.2.1.2.2.1.2 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Indice de IPs e interfaces" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.2.1.4.20.1.2 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Indice de Calidad de Servicio aplicado a cada interfaz" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.1.1.1.4 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Direccion en la cual se esta aplicando la politica" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.1.1.1.3 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Policy maps configurados en el equipo" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.6.1.1.1 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Class maps configurados en el equipo" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.7.1.1.1 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Indice usando Class-maps e Interfaces" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.5.1.1.2 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Indice Parent Classes" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.5.1.1.4 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Valores del Contador 64 bits - Previo a ejecutar Politicas" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.15.1.1.6 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Valores del Gauge32 - Previo a ejecutar Politicas" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.15.1.1.7 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Valores del Contador 64 bits - Despues a ejecutar Politicas" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.15.1.1.10 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Valores del Gauge32 - Despues a ejecutar Politicas" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.15.1.1.11 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Object Type" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.5.1.1.3 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Queueing current depth" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.18.1.1.1 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Queueing max depth" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.18.1.1.2 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#Queueing discards" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write("snmpwalk -v2c -c "+str(community)+" "+str(ip)+" 1.3.6.1.4.1.9.9.166.1.18.1.1.5 | Out-file C:\users\imontoya\Documents\Guatemala\Host_"+str(host)+".txt -Append"+"\n")
my_file.write('echo "" "" "#End" | Out-file C:\users\imontoya\Documents\Guatemala\Host_'+str(host)+'.txt -Append'+"\n")
my_file.write('\n')
my_file.write('\n')
my_file.close()
else:
inactivos=open("Equipos Guatemala Inactivos.txt","a")
inactivos.write(str(host)+" - "+str(ip)+" \n")
inactivos.close()
row+=1
salir = sheet.cell(row=row,column=column).value
print(row,salir)
| [
"ileammontoya@gmail.com"
] | ileammontoya@gmail.com |
ce17feb6b565ca297c89b1aa7dfa5ec05844d87f | a2a7a3d623e83f39bdf9d7b2278772cd1a52a20c | /Stacks, Queues and Deques using LinkedList/stacks_linkedlist.py | d76124f7c78d4d08fbd9f333ad4c28488545b36c | [] | no_license | SauravJalui/data-structures-and-algorithms-in-python | b0eddd7a68bd6092460b04c5746c704389ada8d6 | fdc9192476ebf476aba923e1c07a02d06386ecef | refs/heads/master | 2022-12-15T09:06:34.697057 | 2020-08-17T15:45:59 | 2020-08-17T15:45:59 | 286,195,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | from exceptions import Empty
class LinkedStack:
class Node :
__slots__ = "element", "next"
def __init__(self, element, next):
self.element = element
self.next = next
def __init__(self):
self.size = 0
self.head = None
def __len__(self):
return self.size
def is_Empty(self):
return self.size == 0
def push(self, e):
self.head = self.Node(e, self.head)
self.size += 1
def pop(self):
if self.is_Empty():
raise Empty("Stack is empty")
value = self.head.element
self.head = self.head.next
self.size -= 1
return value
def top(self):
if self.is_Empty():
raise Empty("Stack is empty")
return self.head.element
def display(self):
thead = self.head
while thead:
print(thead.element, end= "==>")
thead = thead.next
print()
ls = LinkedStack()
ls.push(10)
ls.push(20)
ls.push(30)
ls.push(40)
ls.display()
print("Popped: ", ls.pop())
ls.display()
ls.push(70)
ls.display()
print("Top element: ", ls.top())
ls.display()
print("Popped: ", ls.pop())
ls.display()
| [
"sauravjalui.007@gmail.com"
] | sauravjalui.007@gmail.com |
1b304b18d44960ff768c90217ce7ba455dec8c93 | 3378d73f5e7c67ddcf0179e3574357e3354c7c11 | /stripe/db/api.py | 11d21c1e2826ff99a1d951f04beb5a8753b50b8e | [
"Apache-2.0"
] | permissive | babarnazmi/stripe | e8cece6f4697d05c4262b25f40e7056bb61349e5 | f98454e7260b5140aaec35d932a78b3ada73e7a4 | refs/heads/master | 2021-01-15T12:41:17.140601 | 2013-10-30T04:25:50 | 2013-10-30T04:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,694 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLAlchemy storage backend."""
from sqlalchemy.orm import exc
from stripe.common import exception
from stripe.db import models
from stripe.openstack.common.db import api
from stripe.openstack.common.db.sqlalchemy import session as db_session
from stripe.openstack.common import log as logging
LOG = logging.getLogger(__name__)
get_session = db_session.get_session
def get_instance():
"""Return a DB API instance."""
backend_mapping = {'sqlalchemy': 'stripe.db.api'}
return api.DBAPI(backend_mapping=backend_mapping)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
class Connection(object):
"""SqlAlchemy connection."""
def __init__(self):
pass
def create_agent(self, values):
"""Create a new agent."""
res = self._create_model(model=models.Agent(), values=values)
return res
def create_queue(self, values):
"""Create a new queue."""
res = self._create_model(model=models.Queue(), values=values)
return res
def create_queue_member(self, agent_id, queue_id):
"""Create a new queue member."""
values = {
'agent_id': agent_id,
'queue_id': queue_id,
}
res = self._create_model(model=models.QueueMember(), values=values)
return res
def delete_agent(self, agent_id):
"""Delete an agent."""
res = self._delete_model(model=models.Agent, id=agent_id)
if res != 1:
raise exception.AgentNotFound(agent_id=agent_id)
def delete_queue(self, queue_id):
"""Delete a queue."""
res = self._delete_model(model=models.Queue, id=queue_id)
if res != 1:
raise exception.QueueNotFound(queue_id=queue_id)
def delete_queue_member(self, agent_id, queue_id):
"""Delete a queue member."""
res = self._delete_model(
model=models.QueueMember, agent_id=agent_id, queue_id=queue_id
)
if res != 1:
raise exception.QueueMemberNotFound(
agent_id=agent_id
)
def get_agent(self, agent_id):
"""Retrieve information about the given agent."""
try:
res = self._get_model(model=models.Agent, id=agent_id)
except exc.NoResultFound:
raise exception.AgentNotFound(agent_id=agent_id)
return res
def get_queue(self, queue_id):
"""Retrieve information about the given queue."""
try:
res = self._get_model(model=models.Queue, id=queue_id)
except exc.NoResultFound:
raise exception.QueueNotFound(queue_id=queue_id)
return res
def get_queue_member(self, agent_id, queue_id):
"""Retrieve information about the given queue member."""
try:
res = self._get_model(
model=models.QueueMember, agent_id=agent_id, queue_id=queue_id
)
except exc.NoResultFound:
raise exception.QueueMemberNotFound(
agent_id=agent_id
)
return res
def get_user(self, user_id):
"""Retrieve information about the given user."""
try:
res = self._get_model(model=models.User, id=user_id)
except exc.NoResultFound:
raise exception.UserNotFound(user_id=user_id)
return res
def list_agents(self):
"""Retrieve a list of agents."""
res = self._list_model(model=models.Agent)
return res
def list_queues(self):
"""Retrieve a list of queues."""
res = self._list_model(model=models.Queue)
return res
def list_queue_members(self):
"""Retrieve a list of queue members."""
res = self._list_model(model=models.QueueMember)
return res
def list_users(self):
"""Retrieve a list of users."""
res = self._list_model(model=models.User)
return res
def _create_model(self, model, values):
"""Create a new model."""
model.update(values)
model.save()
return model
def _delete_model(self, model, **kwargs):
session = get_session()
with session.begin():
query = model_query(
model, session=session
).filter_by(**kwargs)
count = query.delete()
return count
def _get_model(self, model, **kwargs):
"""Retrieve information about the given model."""
query = model_query(model).filter_by(**kwargs)
res = query.one()
return res
def _list_model(self, model):
"""Retrieve a list of the given model."""
query = model_query(model)
return [m for m in query.all()]
| [
"paul.belanger@polybeacon.com"
] | paul.belanger@polybeacon.com |
25d370edddfa2f96941c9cf229f20493b80f6d89 | 7764bd7530b9ea3f3c31b141ff3ab8173f73b01e | /cart/migrations/0002_items.py | 530d08416dce316610da333aaa50d8d6e56231fe | [] | no_license | smsGitHubsms/foodvloge | 61f6ba99fc572c4620367c5402bb6625ced4663b | 8f2f2786f86dbca1ec12d297b0c8fba8ca976109 | refs/heads/master | 2023-08-21T19:52:56.877338 | 2021-10-19T15:59:02 | 2021-10-19T15:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Generated by Django 3.2.7 on 2021-10-07 12:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_alter_categ_options'),
('cart', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='items',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quan', models.IntegerField()),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.cartlist')),
('prodt', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.products')),
],
),
]
| [
"simonsarakkal@yahoo.com"
] | simonsarakkal@yahoo.com |
c0d8734c640e57bc7339310e1f014f3f748709bb | 8b95a7225a67b6e8ad30b8ab0ef66076858a29e5 | /app/db.py | 87ae41110e3fe4c87ec667bc808b744a168090c4 | [] | no_license | tehhuu/auto_key | e413669b61b7f3f5832b66e753b86c68d16daa1a | 95866259de5781cdde1f010d286c7e42ba99d5ff | refs/heads/master | 2021-04-16T02:05:03.564332 | 2020-06-12T02:42:14 | 2020-06-12T02:42:14 | 252,633,541 | 0 | 0 | null | 2020-04-03T04:38:09 | 2020-04-03T04:38:09 | null | UTF-8 | Python | false | false | 1,446 | py | from sqlalchemy import create_engine, Column, String, Integer, DATETIME
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
import hashlib
engine = create_engine('sqlite:///shimerundesu.db', connect_args={"check_same_thread": False}) #違うスレッドからもアクセスできるようにする
Base = declarative_base()
#与えられた文字列をハッシュ化
def hash(password): return str(hashlib.sha256(password.strip().encode("utf-8")).digest())
#データの構造
class User(Base):
__tablename__ = 'users'
name = Column(String, primary_key=True, unique=True)
password = Column(String)
email = Column(String)
def __repr__(self):
return "User<{}, {}, {}>".format(self.name, self.password)
Base.metadata.create_all(engine)
SessionMaker = sessionmaker(bind=engine)
session = SessionMaker()
if __name__ == "__main__":
#データベース作成処理。このファイルを直接実行すればデータベースを作成可能
user1 = User(name="AAA", password=hash("AAA"), email="AAA@gmail.com")
user2 = User(name="BBB", password=hash("BBB"), email="BBB@gmail.com")
user3 = User(name="CCC", password=hash("CCC"), email="CCC@gmail.com")
user4 = User(name="DDD", password=hash("DDD"), email="DDD@gmail.com")
session.add(user1)
session.add(user2)
session.add(user3)
session.add(user4)
session.commit() | [
"volley_neverlose_exile@yahoo.co.jp"
] | volley_neverlose_exile@yahoo.co.jp |
6dc5dd2a19c1465bda49f14227e31bb5d5962418 | 41051c6db91eb2b71f1f77cc11c2be69e0d004a1 | /modules/core/api/__init__.py | 0433cdb493a0e205096f9c37062c143d09db7e0b | [
"MIT"
] | permissive | shacknetisp/fourthevaz | a2a1bf3f44a9620e76fbbc4a1bcd1d18f0cd3219 | c8e4a97e6ba797ff921bf100aeb4d548bd17549e | refs/heads/master | 2021-01-25T08:59:45.372734 | 2016-01-27T22:13:46 | 2016-01-27T22:13:46 | 27,540,924 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | # -*- coding: utf-8 -*-
import configs.module
import wsgiref.simple_server
import select
import json
import bot
from urllib import parse
import irc.fullparse
import irc.splitparse
import os.path
def init(options):
m = configs.module.Module(__name__)
if 'wserver' in options['server'].state:
del options['server'].state['wserver']
try:
if 'apiport' in options['server'].entry:
options['server'].state[
'wserver'] = wsgiref.simple_server.make_server(
'', options['server'].entry['apiport'],
application(options['server']))
print(('Opening API server on %d' % options[
'server'].entry['apiport']))
except OSError:
print(('Unable to open API server on %d' % options[
'server'].entry['apiport']))
m.set_help('Access various bot functions from a json API.')
m.add_timer_hook(1 * 1000, timer)
m.add_base_hook('api.action.command', apiactioncommand)
m.add_base_hook('api.path.interface', apipathinterface)
return m
class application:
def __init__(self, server):
self.server = server
def __call__(self, environ, start_response):
ret = {
'status': 'error',
'message': 'unknown',
}
start_response('200 OK',
[('content-type', 'text/html;charset=utf-8')])
path = environ['PATH_INFO'].strip('/')
q = parse.parse_qs(environ['QUERY_STRING'])
action = q['action'][0] if 'action' in q else ''
try:
if path:
ret['message'] = 'unknown request'
ret['status'] = 'error'
self.server.do_base_hook('api.path.%s' % path,
ret, self.server, q, environ)
else:
ret['message'] = 'invalid action'
ret['status'] = 'error'
self.server.do_base_hook('api.action.%s' % action,
ret, self.server, q, environ)
if '_html' in ret:
return [ret['_html'].encode('utf-8')]
except KeyError:
pass
return [json.dumps(ret).encode('utf-8')]
def apiactioncommand(ret, server, q, environ):
del ret['message']
ip = environ['REMOTE_ADDR']
if 'command' not in q:
ret['message'] = 'no command'
ret['status'] = 'error'
if server.type == 'irc':
def process_message(i):
sp = irc.splitparse.SplitParser(i)
fp = irc.fullparse.FullParse(
server, sp, nomore=True)
return fp.execute(sp.text)
ret['output'] = process_message(
':%s!%s PRIVMSG %s :%s' % (':' + ip, "~api@" + ip,
server.nick,
q['command'][0],
))
elif server.type == 'file':
ret['output'] = server.fp(server, q['command'][0])
ret['status'] = 'good'
def apipathinterface(ret, server, q, environ):
del ret['message']
ret['_html'] = open(os.path.dirname(__file__) + '/interface.html').read()
ret['status'] = 'good'
def timer():
for server in bot.servers():
if 'wserver' not in server.state:
continue
wserver = server.state['wserver']
inr, _, _ = select.select([wserver], [], [], 0.01)
if inr:
wserver.handle_request() | [
"shacknetisp@hotmail.com"
] | shacknetisp@hotmail.com |
09d203431dcba5cd1013b02533880f832da05b45 | 78af3c3c22ab6c0a5cb3256e1891842707e7b022 | /utils/preprocessing.py | 244c2c396fb9f67eb3829755d9bfd610796d3aa5 | [
"MIT"
] | permissive | DrFunny/DeepLav-V3-Custom-Dataset | c2a67ec15af02de45fe9bddca1f43c55f294a8f5 | bf86f1acbd9e4b2f63b2de7d2c9a2db1140afcf1 | refs/heads/master | 2021-04-06T15:19:07.052924 | 2018-03-15T11:20:52 | 2018-03-15T11:20:52 | 125,356,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,733 | py | """Utility functions for preprocessing data sets."""
from PIL import Image
import numpy as np
import tensorflow as tf
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
# colour map
label_colours = [(0, 0, 255), # 0=background
(255, 0, 0) # 1=nails
]
def decode_labels(mask, num_images=1, num_classes=21):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w, c = mask.shape
assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' \
% (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
def mean_image_addition(image, means=(_R_MEAN, _G_MEAN, _B_MEAN)):
"""Adds the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] += means[i]
return tf.concat(axis=2, values=channels)
def mean_image_subtraction(image, means=(_R_MEAN, _G_MEAN, _B_MEAN)):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def random_rescale_image_and_label(image, label, min_scale, max_scale):
"""Rescale an image and label with in target scale.
Rescales an image and label within the range of target scale.
Args:
image: 3-D Tensor of shape `[height, width, channels]`.
label: 3-D Tensor of shape `[height, width, 1]`.
min_scale: Min target scale.
max_scale: Max target scale.
Returns:
Cropped and/or padded image.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
If `labels` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, 1]`.
"""
if min_scale <= 0:
raise ValueError('\'min_scale\' must be greater than 0.')
elif max_scale <= 0:
raise ValueError('\'max_scale\' must be greater than 0.')
elif min_scale >= max_scale:
raise ValueError('\'max_scale\' must be greater than \'min_scale\'.')
shape = tf.shape(image)
height = tf.to_float(shape[0])
width = tf.to_float(shape[1])
scale = tf.random_uniform(
[], minval=min_scale, maxval=max_scale, dtype=tf.float32)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
image = tf.image.resize_images(image, [new_height, new_width],
method=tf.image.ResizeMethod.BILINEAR)
# Since label classes are integers, nearest neighbor need to be used.
label = tf.image.resize_images(label, [new_height, new_width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image, label
def random_crop_or_pad_image_and_label(image, label, crop_height, crop_width, ignore_label):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by rondomly
cropping the image or padding it evenly with zeros.
Args:
image: 3-D Tensor of shape `[height, width, channels]`.
label: 3-D Tensor of shape `[height, width, 1]`.
crop_height: The new height.
crop_width: The new width.
ignore_label: Label class to be ignored.
Returns:
Cropped and/or padded image.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
label = label - ignore_label # Subtract due to 0 padding.
label = tf.to_float(label)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_and_label = tf.concat([image, label], axis=2)
image_and_label_pad = tf.image.pad_to_bounding_box(
image_and_label, 0, 0,
tf.maximum(crop_height, image_height),
tf.maximum(crop_width, image_width))
image_and_label_crop = tf.random_crop(
image_and_label_pad, [crop_height, crop_width, 4])
image_crop = image_and_label_crop[:, :, :3]
label_crop = image_and_label_crop[:, :, 3:]
label_crop += ignore_label
label_crop = tf.to_int32(label_crop)
return image_crop, label_crop
def random_flip_left_right_image_and_label(image, label):
"""Randomly flip an image and label horizontally (left to right).
Args:
image: A 3-D tensor of shape `[height, width, channels].`
label: A 3-D tensor of shape `[height, width, 1].`
Returns:
A 3-D tensor of the same type and shape as `image`.
A 3-D tensor of the same type and shape as `label`.
"""
uniform_random = tf.random_uniform([], 0, 1.0)
mirror_cond = tf.less(uniform_random, .5)
image = tf.cond(mirror_cond, lambda: tf.reverse(image, [1]), lambda: image)
label = tf.cond(mirror_cond, lambda: tf.reverse(label, [1]), lambda: label)
return image, label
def eval_input_fn(image_filenames, label_filenames=None, batch_size=1):
"""An input function for evaluation and inference.
Args:
image_filenames: The file names for the inferred images.
label_filenames: The file names for the grand truth labels.
batch_size: The number of samples per batch. Need to be 1
for the images of different sizes.
Returns:
A tuple of images and labels.
"""
# Reads an image from a file, decodes it into a dense tensor
def _parse_function(filename, is_label):
if not is_label:
image_filename, label_filename = filename, None
else:
image_filename, label_filename = filename
image_string = tf.read_file(image_filename)
image = tf.image.decode_image(image_string)
image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))
image.set_shape([None, None, 3])
image = mean_image_subtraction(image)
if not is_label:
return image
else:
label_string = tf.read_file(label_filename)
label = tf.image.decode_image(label_string)
label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))
label.set_shape([None, None, 1])
return image, label
if label_filenames is None:
input_filenames = image_filenames
else:
input_filenames = (image_filenames, label_filenames)
dataset = tf.data.Dataset.from_tensor_slices(input_filenames)
if label_filenames is None:
dataset = dataset.map(lambda x: _parse_function(x, False))
else:
dataset = dataset.map(lambda x, y: _parse_function((x, y), True))
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
if label_filenames is None:
images = iterator.get_next()
labels = None
else:
images, labels = iterator.get_next()
return images, labels
| [
"mailatkumarravi@gmail.com"
] | mailatkumarravi@gmail.com |
ad6bf91b33b968d54e7db7520ad4160735b51f89 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/cloud/rackspace/rax_mon_notification.py | 6aee351b964b059b494cccdaa5d0ebe4607d31ee | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 5,165 | py | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification
short_description: Create or delete a Rackspace Cloud Monitoring notification.
description:
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
channel that can be used to communicate alarms, such as email, webhooks, or
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification with this C(label) exists or does not exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification. String between 1 and 255
characters long.
required: true
notification_type:
description:
- A supported notification type.
choices: ["webhook", "email", "pagerduty"]
required: true
details:
description:
- Dictionary of key-value pairs used to initialize the notification.
Required keys and meanings vary with notification type. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
service-notification-types-crud.html for details.
required: true
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Monitoring notification example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Email me when something goes wrong.
rax_mon_entity:
credentials: ~/.rax_pub
label: omg
type: email
details:
address: me@mailhost.com
register: the_notification
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def notification(module, state, label, notification_type, details):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notifications():
if n.label == label:
existing.append(n)
if existing:
notification = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing notifications are labelled %s.' %
(len(existing), label))
if notification:
should_delete = (notification_type != notification.type)
should_update = (details != notification.details)
if should_update and not should_delete:
notification.update(details=notification.details)
changed = True
if should_delete:
notification.delete()
else:
should_create = True
if should_create:
notification = cm.create_notification(notification_type,
label=label, details=details)
changed = True
else:
for n in existing:
n.delete()
changed = True
if notification:
notification_dict = {
"id": notification.id,
"type": notification.type,
"label": notification.label,
"details": notification.details
}
module.exit_json(changed=changed, notification=notification_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
details=dict(required=True, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
notification_type = module.params.get('notification_type')
details = module.params.get('details')
setup_rax_module(module, pyrax)
notification(module, state, label, notification_type, details)
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
b466ab33c5241649fa144b9a4ad3446d2303e061 | 5837de803acd450cfee49d22d25adb2faa940687 | /Python/Demo_FC_Regression.py | dc43219ba6d88352bbbe8f83dedeb912c85b1359 | [] | no_license | hbtom/AlgorithmCode | 2c286bece41157cfb5141f106090067ce759e51a | 89ebfaacbc7f5cc7914df5537c2339dd31f1bb51 | refs/heads/master | 2020-03-22T19:47:27.036083 | 2018-04-21T07:11:05 | 2018-04-21T07:11:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 21:42:17 2018
@author: wenqiwang
"""
import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
# load dataset
dataframe = pandas.read_csv("housing.csv", delim_whitespace=True, header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:,0:13]
Y = dataset[:,13]
# define base model
def larger_model():
# create model
model = Sequential()
model.add(Dense(13, input_dim=13, kernel_initializer='normal', activation='relu'))
model.add(Dense(6, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
# fix random seed for reproducibility
#seed = 7
#numpy.random.seed(seed)
# evaluate model with standardized dataset
seed = 7
numpy.random.seed(seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasRegressor(build_fn=larger_model, epochs=50, batch_size=5, verbose=0)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(pipeline, X, Y, cv=kfold)
print("Larger: %.2f (%.2f) MSE" % (results.mean(), results.std())) | [
"wenqiwang@pal-nat186-47-54.itap.purdue.edu"
] | wenqiwang@pal-nat186-47-54.itap.purdue.edu |
3623809ed7baff2ef3553ae5ea56de4d7103565c | 930309163b930559929323647b8d82238724f392 | /abc104_b.py | 71226076067ba3980f151a868e680909d3029fb5 | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from collections import Counter
s = input()
head = s[0]
mid = dict(Counter(s[2:-1]))
removed = s.replace('A', '').replace('C', '')
if head == 'A' and mid.get('C') is not None and mid['C'] == 1 and removed == removed.lower():
print('AC')
else:
print('WA')
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
0324f6f61c68dce5be870b8415bcc91ede6c9f40 | e0b8c382217ebdba15af924cb1124fa9900a497f | /课堂笔记/day2/字符串格式化.py | f3f0c77516610b686b8f56d5843153492f9b10dc | [] | no_license | Lujinjian-hunan/python_study | f02cf7ce9c57d277c5ef022571bd11db4b9fea41 | 8b9ddcc479d367d146168a941a03a0e45cd92b0e | refs/heads/master | 2022-12-19T03:07:19.426428 | 2020-09-21T10:13:16 | 2020-09-21T10:13:16 | 292,776,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # xxx,欢迎登陆
# xxx,今天要喝8杯水
import datetime
name = '张三'
welcome = '张三,欢迎登陆'
today = datetime.datetime.today()
# 字符串格式化:
# 1、简单粗暴直接 +
# print(name + ',欢迎登陆' + '今天的日期是' + str(today))#不推荐
# 2、占位符,%d(数字) %s(字符串) %f(浮点类型)
# welcome = '%s,欢迎登陆,今天的日期是 %s' % (name,today)
# print(welcome)
# age = 18
# age_str = '你的年龄是 %d' % age
# score = 73.98312
# score_str = '你的成绩是 %.2f' % score
# print(age_str)
# print(score_str)
# 3、大括号的方式
name2 = 'lily'
phone = '1381232523'
grade = 'tmz'
money = 5000
score = 98.133
addr = '北京'
welcome = '{name},欢迎登陆,今天的日期是{today}'.format(today=today,name=name)
welcome2 = '{},欢迎登陆,今天的日期是{}'.format(name,today)
print(welcome)
print(welcome2)
| [
"lujinjianhunan@qq.com"
] | lujinjianhunan@qq.com |
dc667ed4ef9aa8a702b522295db28f58590bd60f | 45bba18abbc10d1e3ae4857c3420aab74612d787 | /simple_func.py | b2b044dfb0f2b646fd697a1a77324c534a9bc6d3 | [] | no_license | shaikzia/Misc_1 | 3e2b295a4c1c8686c523f4426f2af1e84683ceaf | b02bc6667fc2ba581c4a3b08e583db181f6a30e5 | refs/heads/master | 2020-03-25T18:58:14.688221 | 2018-08-08T21:27:27 | 2018-08-08T21:27:27 | 144,057,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | def squares(l):
for i in l:
a = i*i
print(a)
squares([1,2,3,4])
| [
"shaikzia@gmail.com"
] | shaikzia@gmail.com |
1d8e6fe13a7b49a3266bd445672a49dd36d2d8b8 | cfe1cbb271aec8f147dad6433371bcd6090565d1 | /django_EachSetp/quant/urls.py | 49e1550a28cc2af8fa96b7d2230c6befb4bac2f1 | [] | no_license | fulongyang/DjangoRestFrameworkApiDcoment | 64842bf6423a9aca778f37f5f4cb5dd8049085e8 | bafe0467b56c8b5d9916034102db599e842a7540 | refs/heads/master | 2020-05-23T22:57:58.726846 | 2019-05-16T08:49:05 | 2019-05-16T08:49:05 | 186,984,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py |
from django.conf.urls import url,include
from rest_framework import routers
from quant import views
urlpatterns = [
url(r'quant',views.Quant.as_view()),
url(r'quantForex',views.QuantForex.as_view()),
]
| [
"993294959@qq.com"
] | 993294959@qq.com |
3e96fd444da733bdf83d8b8fa568d8654594ea4d | eab233ab17c91734e90f3b491bf4d550b1ed29cc | /src/setup.py | 7dd9c52ea504cd23a9d53d66bd053a69c935976e | [] | no_license | zxpeter/learnkedro | c591d7528a699de230e5d5080b5c5bc719056b86 | 6ce47c1d7ee67fa7914ebc85dd978626c20a2b5e | refs/heads/master | 2023-01-08T22:59:38.999303 | 2020-01-06T08:18:35 | 2020-01-06T08:18:35 | 232,045,482 | 0 | 0 | null | 2022-12-26T20:49:02 | 2020-01-06T07:14:49 | Python | UTF-8 | Python | false | false | 2,609 | py | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
entry_point = (
"example_kedro = example_kedro.run:run_package"
)
# get the dependencies and installs
with open("requirements.txt", "r", encoding="utf-8") as f:
# Make sure we strip all comments and options (e.g "--extra-index-url")
# that arise from a modified pip.conf file that configure global options
# when running kedro build-reqs
requires = []
for line in f:
req = line.split("#", 1)[0].strip()
if req and not req.startswith("--"):
requires.append(req)
setup(
name="example_kedro",
version="0.1",
packages=find_packages(exclude=["tests"]),
entry_points={"console_scripts": [entry_point]},
install_requires=requires,
extras_require={
"docs": [
"sphinx>=1.6.3, <2.0",
"sphinx_rtd_theme==0.4.1",
"nbsphinx==0.3.4",
"nbstripout==0.3.3",
"recommonmark==0.5.0",
"sphinx-autodoc-typehints==1.6.0",
"sphinx_copybutton==0.2.5",
"jupyter_client>=5.1.0, <6.0",
"tornado>=4.2, <6.0",
"ipykernel>=4.8.1, <5.0",
]
},
)
| [
"peter_zhao@mckinsey.com"
] | peter_zhao@mckinsey.com |
42812734dd1695ef1dc396cf6f70624ba0ec51a5 | 859cfe506a6ca07e86119d861d0a43eed4a0ac6f | /lesson_9_tasks/task_9.1.py | eae0bd879bab7cfc6953d20873cec2e9f045d529 | [] | no_license | Elena-brv/python | b8fb8a3dce34e2fd2c88bab49f6b6bed670600c4 | 4840febdec48c6aa31b8279449baa6e9780165ea | refs/heads/master | 2022-09-18T08:28:11.103256 | 2020-06-04T10:20:55 | 2020-06-04T10:20:55 | 259,435,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | class Rectangle:
def __init__(self, side1, side2):
self.side1 = side1
self.side2 = side2
def square(self):
return self.side1 * self.side2
def perimeter(self):
return 2 * (self.side1 + self.side2)
rectangle = Rectangle(10, 20)
print(f'side1 = {rectangle.side1}, side2 = {rectangle.side2}')
square = rectangle.square()
perimeter = rectangle.perimeter()
print(f'Square of rectangle = {square}, perimeter = {perimeter}')
| [
"noreply@github.com"
] | noreply@github.com |
2a7e4b5860ab2ccae8cf2c5725f8ef0fce84e41a | dcf529f6d2e2e5d6bd206f5d18e636bb74531429 | /scratch/dx7_constants.py | 9b4fd7a223ab121e9aa177fd12c82ee9eaa20bb1 | [
"MIT"
] | permissive | jGambit/NeuralDX7 | f25ac70d3c75d81abf554aa447e2a29773ef41bf | 327844cea18a6dfe35e0dc8f5de0832343487366 | refs/heads/master | 2022-12-12T09:11:09.144554 | 2020-09-01T01:05:27 | 2020-09-01T01:05:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,228 | py | from pathlib import Path
import bitstruct
ARTIFACTS_ROOT = Path('/content/gdrive/My Drive/audio/artifacts').expanduser()
def take(take_from, n):
for _ in range(n):
yield next(take_from)
N_OSC = 6
N_VOICES = 32
def checksum(data):
return (128-sum(data)&127)%128
GLOBAL_VALID_RANGES = {
'PR1': range(0, 99+1),
'PR2': range(0, 99+1),
'PR3': range(0, 99+1),
'PR4': range(0, 99+1),
'PL1': range(0, 99+1),
'PL2': range(0, 99+1),
'PL3': range(0, 99+1),
'PL4': range(0, 99+1),
'ALG': range(0, 31+1),
'OKS': range(0, 1+1),
'FB': range(0, 7+1),
'LFS': range(0, 99+1),
'LFD': range(0, 99+1),
'LPMD': range(0, 99+1),
'LAMD': range(0, 99+1),
'LPMS': range(0, 7+1),
'LFW': range(0, 5+1),
'LKS': range(0, 1+1),
'TRNSP': range(0, 48+1),
'NAME CHAR 1': range(128),
'NAME CHAR 2': range(128),
'NAME CHAR 3': range(128),
'NAME CHAR 4': range(128),
'NAME CHAR 5': range(128),
'NAME CHAR 6': range(128),
'NAME CHAR 7': range(128),
'NAME CHAR 8': range(128),
'NAME CHAR 9': range(128),
'NAME CHAR 10': range(128),
}
OSCILLATOR_VALID_RANGES = {
'R1': range(0, 99+1),
'R2': range(0, 99+1),
'R3': range(0, 99+1),
'R4': range(0, 99+1),
'L1': range(0, 99+1),
'L2': range(0, 99+1),
'L3': range(0, 99+1),
'L4': range(0, 99+1),
'BP': range(0, 99+1),
'LD': range(0, 99+1),
'RD': range(0, 99+1),
'RC': range(0, 3+1),
'LC': range(0, 3+1),
'DET': range(0, 14+1),
'RS': range(0, 7+1),
'KVS': range(0, 7+1),
'AMS': range(0, 3+1),
'OL': range(0, 99+1),
'FC': range(0, 31+1),
'M': range(0, 1+1),
'FF': range(0, 99+1),
}
VOICE_PARAMETER_RANGES = {f'{i}_{key}': value for key, value in OSCILLATOR_VALID_RANGES.items() for i in range(N_OSC)}
VOICE_PARAMETER_RANGES.update(GLOBAL_VALID_RANGES)
def verify(actual, ranges):
assert set(actual.keys())==set(ranges.keys()), 'Params dont match'
for key in actual:
if not actual[key] in ranges[key]:
return False
return True
HEADER_KEYS = [
'ID',
'Sub-status',
'format number',
'byte count',
'byte count',
]
GENERAL_KEYS = [
'PR1',
'PR2',
'PR3',
'PR4',
'PL1',
'PL2',
'PL3',
'PL4',
'ALG',
'OKS',
'FB',
'LFS',
'LFD',
'LPMD',
'LAMD',
'LPMS',
'LFW',
'LKS',
'TRNSP',
'NAME CHAR 1',
'NAME CHAR 2',
'NAME CHAR 3',
'NAME CHAR 4',
'NAME CHAR 5',
'NAME CHAR 6',
'NAME CHAR 7',
'NAME CHAR 8',
'NAME CHAR 9',
'NAME CHAR 10',
]
OSC_KEYS = [
'R1',
'R2',
'R3',
'R4',
'L1',
'L2',
'L3',
'L4',
'BP',
'LD',
'RD',
'RC',
'LC',
'DET',
'RS',
'KVS',
'AMS',
'OL',
'FC',
'M',
'FF',
]
FOOTER_KEYS = ['checksum']
VOICE_KEYS = [f'{i}_{key}' for i in range(6) for key in OSC_KEYS] + \
GENERAL_KEYS
KEYS = HEADER_KEYS + \
list(VOICE_KEYS * N_VOICES) + \
FOOTER_KEYS
header_bytes = [
'p1u7', # ID # (i=67; Yamaha)
'p1u7', # Sub-status (s=0) & channel number (n=0; ch 1)
'p1u7', # format number (f=9; 32 voices)
'p1u7', # byte count MS byte
'p1u7', # byte count LS byte (b=4096; 32 voices)
]
general_parameter_bytes = [
'p1u7', # PR1
'p1u7', # PR2
'p1u7', # PR3
'p1u7', # PR4
'p1u7', # PL1
'p1u7', # PL2
'p1u7', # PL3
'p1u7', # PL4
'p3u5', # ALG
'p4u1u3', # OKS| FB
'p1u7', # LFS
'p1u7', # LFD
'p1u7', # LPMD
'p1u7', # LAMD
'p1u3u3u1', # LPMS | LFW |LKS
'p1u7', # TRNSP
'p1u7', # NAME CHAR 1
'p1u7', # NAME CHAR 2
'p1u7', # NAME CHAR 3
'p1u7', # NAME CHAR 4
'p1u7', # NAME CHAR 5
'p1u7', # NAME CHAR 6
'p1u7', # NAME CHAR 7
'p1u7', # NAME CHAR 8
'p1u7', # NAME CHAR 9
'p1u7', # NAME CHAR 10
]
osc_parameter_bytes = [
'p1u7', # R1
'p1u7', # R2
'p1u7', # R3
'p1u7', # R4
'p1u7', # L1
'p1u7', # L2
'p1u7', # L3
'p1u7', # L4
'p1u7', # BP
'p1u7', # LD
'p1u7', # RD
'p4u2u2', # RC | LC
'p1u4u3', # DET | RS
'p3u3u2', # KVS | AMS
'p1u7', # OL
'p2u5u1', # FC | M
'p1u7' # FF
]
voice_bytes = (osc_parameter_bytes * N_OSC) + general_parameter_bytes
tail_bytes = [
'p1u7', # checksum
]
full_string = ''.join(header_bytes + osc_parameter_bytes * 6 + general_parameter_bytes)
dx7_struct = bitstruct.compile(full_string)
voice_struct = bitstruct.compile(''.join(voice_bytes), names=VOICE_KEYS)
header_struct = bitstruct.compile(''.join(header_bytes))
if __name__=="__main__":
print(VOICE_KEYS) | [
"nintoracaudio@gmail.com"
] | nintoracaudio@gmail.com |
62c9e199951a20e568b4669c6dacaa73e3b4b916 | 4e0a038b851a7e8eec78309f5eb56884275ab529 | /PGA_attacks.py | b3ca7af98af39546a14a1776a6853ee06291d078 | [
"Apache-2.0"
] | permissive | xuxinzhang/PGA-attack | 383adb04e41798269668cdf007e65f7518a19cec | 4181745df6c48350ea9f3bc7f6643fa3b22f171e | refs/heads/master | 2022-12-17T21:04:30.388267 | 2020-09-15T08:55:16 | 2020-09-15T08:55:16 | 290,934,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,514 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 19 15:22:04 2020
@author: zxx
"""
import random
import time
import copy
import numpy as np
from numpy.random import RandomState
from dataset import load_movielens_ratings
from dataset import build_user_item_matrix
from ALS_optimize import ALS
from ALS_optimize_origin import ALS_origin
from evaluation import predict
from evaluation import RMSE
from compute_grad import compute_grad_PGA
#from attack_evaluation import availability_rmse
def random_mal_ratings(mal_user,n_item,mal_item,seed = None):
# random generator malicious users data
assert mal_item < n_item
mal_ratings = []
for u in range(mal_user):
mal_user_idx = u
mal_item_idx = random.sample(range(n_item), mal_item)
for i in range(mal_item):
mal_movie_idx = mal_item_idx[i]
RandomState(seed).rand()/0.2
mal_rating = int(5 * RandomState(seed).rand()) + 1
mal_ratings.append([mal_user_idx, mal_movie_idx, mal_rating])
return np.array(mal_ratings)
#数据类型转换的函数
def arraytorating(malarray, mal_user, n_item):
malrating = []
for u in range(mal_user):
for i in range(n_item):
if malarray[u,i] != 0 :
malrating.append([u, i, malarray[u,i]])
return np.array(malrating)
############################################################################################################
#train origin model
def optimize_model_origin(converge, n_user, n_item, n_feature, train, mean_rating_, lamda_u, lamda_v, user_features_origin_, item_features_origin_):
print("Start training model without data poisoning attacks!")
last_rmse = None
n_iters = 100
for iteration in range(n_iters):
t1 = time.time()
user_features_origin_, item_features_origin_ = ALS_origin(n_user, n_item, n_feature, train, mean_rating_, lamda_u, lamda_v, user_features_origin_, item_features_origin_)
train_preds = predict(train.take([0, 1], axis=1), user_features_origin_, item_features_origin_)
train_rmse = RMSE(train_preds, train.take(2, axis=1) - 3)
t2 = time.time()
print("The %d th iteration \t time: %ds \t RMSE: %f " % (iteration + 1, t2 - t1, train_rmse))
# stop when converge
if last_rmse and abs(train_rmse - last_rmse) < converge:
break
else:
last_rmse = train_rmse
return last_rmse
#train added attack data model
def optimize_model(converge, n_user, n_item, n_feature, mal_user, train, mean_rating_, mal_mean_rating_, mal_ratings, lamda_u, lamda_v, \
user_features_, mal_user_features_, item_features_):
print("Start training model with data poisoning attacks!")
last_rmse = None
n_iters = 100
for iteration in range(n_iters):
t1 = time.time()
user_features_, mal_user_features_, item_features_ = ALS(n_user, n_item, n_feature, mal_user, \
train, mean_rating_, mal_mean_rating_, mal_ratings, lamda_u, lamda_v, \
user_features_, mal_user_features_, item_features_)
train_preds = predict(train.take([0, 1], axis=1), user_features_, item_features_)
train_rmse = RMSE(train_preds, train.take(2, axis=1) - 3)
t2 = time.time()
print("The %d th iteration \t time: %ds \t RMSE: %f " % (iteration + 1, t2 - t1, train_rmse))
# stop when converge
if last_rmse and abs(train_rmse - last_rmse) < converge:
break
else:
last_rmse = train_rmse
return last_rmse
def main_PGA(data_size,attack_size = 0.05,fill_size = 0.05,target_item = 22):
'''
parameters:
lamda_u: the regularization parameter of user
lamda_v: the regularization parameter of item
attack_size: the proportion of malicious users
mal_item: the items of malicious users rating
n_iter: number of iteration
converge: the least RMSE between two iterations
train_pct: the proportion of train dataset
'''
n_feature = 64
lamda_u = 5e-2
lamda_v = 5e-2
converge = 1e-5
if data_size == '100K':
ratings_file = 'ratings_ml.csv'
ratings = load_movielens_ratings(ratings_file)
if data_size == '1M':
ratings = np.load('ratings_1m.npy')
#断言评分的最大值为5,最小值为1
max_rating = max(ratings[:, 2])
min_rating = min(ratings[:, 2])
assert max_rating == 5
assert min_rating == 1
train = ratings
n_user = max(train[:, 0]) + 1
n_item = max(train[:, 1]) + 1
mal_user = int(attack_size * n_user)
# mal_user = 2
# mal_user = 47
mal_item = int(fill_size * n_item)
# add malicious users data
mal_ratings = random_mal_ratings(mal_user,n_item,mal_item)
#initialize the matrix U U~ and V
seed = None
user_features_ = 0.1 * RandomState(seed).rand(n_user, n_feature)
mal_user_features_ = 0.1 * RandomState(seed).rand(mal_user, n_feature)
item_features_ = 0.1 * RandomState(seed).rand(n_item, n_feature)
mean_rating_ = np.mean(train.take(2, axis=1))
mal_mean_rating_ = np.mean(mal_ratings.take(2, axis=1))
user_features_origin_ = 0.1 * RandomState(seed).rand(n_user, n_feature)
item_features_origin_ = 0.1 * RandomState(seed).rand(n_item, n_feature)
#using the algorithm of PGA to optimize the utility function
'''
m_iters: number of iteration in PGA
s_t: step size
Lamda: the contraint of vector
'''
m_iters = 10
s_t = 0.2 * np.ones([m_iters])
last_rmse = None
last_rmse = optimize_model_origin(converge, n_user, n_item, n_feature, train, mean_rating_, \
lamda_u, lamda_v, user_features_origin_, item_features_origin_)
print(last_rmse)
mal_data = build_user_item_matrix(mal_user,n_item,mal_ratings).toarray()
mal_data_index_dic = {} #这个好像是为了只更新有值的部分
for i in range(mal_user):
mal_data_index_dic[i] = np.where(mal_data[i,:] != 0)
last_rmse = optimize_model(converge, n_user, n_item, n_feature, mal_user, train, \
mean_rating_, mal_mean_rating_, mal_ratings, lamda_u, \
lamda_v, user_features_, mal_user_features_, item_features_)
for t in range(m_iters):
t1 = time.time()
# grad_total = np.zeros([mal_user, n_item])
## w_j0 = 2, u1 = 0.5, u2 = 0.5
# for i in range(mal_user):
# print('Computing the %dth malicious user' %i)
# mal_use_index = mal_data_index_dic[i][0]
# for j in range(mal_use_index.shape[0]):
# # print(j)
# mal_item = mal_use_index[j]
#
# grad_total[i, mal_item] = 0.2
grad_total = compute_grad_PGA(mal_data_index_dic, n_user, n_item, mal_user, mal_ratings, train, user_features_, mal_user_features_, \
item_features_, lamda_v, n_feature, user_features_origin_, item_features_origin_, target_item)
# grad_total = 0.01*np.ones([mal_user,n_item])
# grad_total = np.load('grad.npy')
temp = copy.deepcopy(mal_data)
mal_data_new = mal_data + grad_total * s_t[t]
mal_data_new[mal_data_new > 5] = 5
mal_data_new[mal_data_new < 1] = 1
mal_data = np.multiply(mal_data_new,1 * (temp != 0 ))
np.save('attack_PGA_05_801_%d.npy'%t,(mal_data + 0.5).astype(np.int32))
print('PGA saved %d'%t)
# mal_data[mal_data > 5] = 5
# mal_data[mal_data < 0] = 0
mal_ratings = arraytorating(mal_data, mal_user, n_item).astype(np.int32)
mal_mean_rating_ = np.mean(mal_ratings.take(2, axis=1))
# rmse = RMSE(mal_data, temp)
rmse = optimize_model(converge, n_user, n_item, n_feature, mal_user, train, \
mean_rating_, mal_mean_rating_, mal_ratings, lamda_u, \
lamda_v, user_features_, mal_user_features_, item_features_)
t2 = time.time()
print("The %d th iteration \t time: %ds \t RMSE: %f " % (t + 1, t2 - t1, rmse))
if last_rmse and abs(rmse - last_rmse) < converge:
break
else:
last_rmse = rmse
np.save('attack_PGA_05.npy',(mal_data + 0.5).astype(np.int32))
print('PGA saved')
return (mal_data + 0.5).astype(np.int32)
#test = np.load('attack_PGA_05.npy')
#data_size = '100K'
data_size = '1M'
#rmse_av, rmse_seen = availability_rmse(train, mal_data, converge, lamda_u, lamda_v)
#mal_PGA = main_PGA(data_size)
###########################################################################################################
##########################################################################
#from attack_evaluation import attack_df_rmse
#
#
#mal_data_df = mal_data.astype(np.int32) + 3
#attack_df = np.concatenate((train_matrix,mal_data_df),axis = 0)
#
#def attack_r(attack_df):
# attack_rating = []
# for u in range(attack_df.shape[0]):
# for i in range(attack_df.shape[1]):
# if attack_df[u,i] != 0 :
# attack_rating.append([u, i, attack_df[u,i]])
# attack_rating = np.array(attack_rating).astype(np.int32)
# return attack_rating
#
#attack_rating = attack_r(attack_df)
#attack_df_rmse(attack_rating,attack_df.shape[0],attack_df.shape[1])
| [
"43082328+xuxinzhang@users.noreply.github.com"
] | 43082328+xuxinzhang@users.noreply.github.com |
55e1d31dd2e470dfcd920512e254a5464d23d426 | dde719db687e7625e280ac69ddbf3d1b82dab67a | /flask-app/venv/bin/easy_install-2.7 | a71f6d955414d3dd51d89c1d75115c6b30d12580 | [] | no_license | kevin-warshaw/cloud-computing-pa2 | ccb884ea45b74429a1b2bc914491fdc71aa036db | c9f827f80277afb3ca554f075f6f440fd78cf4d6 | refs/heads/master | 2021-04-30T00:57:32.721006 | 2018-02-14T19:59:38 | 2018-02-14T19:59:38 | 121,469,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | 7 | #!/Users/kevinwarshaw1/Documents/Cloud/cloud-computing-pa2/flask-app/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"31418628+kevin-warshaw@users.noreply.github.com"
] | 31418628+kevin-warshaw@users.noreply.github.com |
356ef6a9a413417230d7a00ec03943f5b55e71fa | 54fe259ac9c0a65b39d662ae9cc30d5e0a7acba0 | /IX/Client.py | 3b1fed11b78c8c5f7920551aa11a4ddfaaea43ce | [] | no_license | kumanmontt/python- | 805894e0e02bd64edfbbcbafb469a7f73deca05e | e40ff42398b2ae63dba1301949b834754e05f75a | refs/heads/master | 2021-06-25T07:06:09.520154 | 2020-10-23T05:15:50 | 2020-10-23T05:15:50 | 146,395,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,883 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# encoding: utf-8
#客户端调用,用于查看API返回结果
from OkcoinSpotAPI import OKCoinSpot
from OkcoinFutureAPI import OKCoinFuture
#初始化apikey,secretkey,url
apikey = 'XXXX'
secretkey = 'XXXXX'
okcoinRESTURL = 'www.okcoin.com' #请求注意:国内账号需要 修改为 www.okcoin.cn
#现货API
okcoinSpot = OKCoinSpot(okcoinRESTURL,apikey,secretkey)
#期货API
okcoinFuture = OKCoinFuture(okcoinRESTURL,apikey,secretkey)
print (u' 现货行情 ')
print (okcoinSpot.ticker('btc_usd'))
print (u' 现货深度 ')
print (okcoinSpot.depth('btc_usd'))
#print (u' 现货历史交易信息 ')
#print (okcoinSpot.trades())
#print (u' 用户现货账户信息 ')
#print (okcoinSpot.userinfo())
#print (u' 现货下单 ')
#print (okcoinSpot.trade('ltc_usd','buy','0.1','0.2'))
#print (u' 现货批量下单 ')
#print (okcoinSpot.batchTrade('ltc_usd','buy','[{price:0.1,amount:0.2},{price:0.1,amount:0.2}]'))
#print (u' 现货取消订单 ')
#print (okcoinSpot.cancelOrder('ltc_usd','18243073'))
#print (u' 现货订单信息查询 ')
#print (okcoinSpot.orderinfo('ltc_usd','18243644'))
#print (u' 现货批量订单信息查询 ')
#print (okcoinSpot.ordersinfo('ltc_usd','18243800,18243801,18243644','0'))
#print (u' 现货历史订单信息查询 ')
#print (okcoinSpot.orderHistory('ltc_usd','0','1','2'))
#print (u' 期货行情信息')
#print (okcoinFuture.future_ticker('ltc_usd','this_week'))
#print (u' 期货市场深度信息')
#print (okcoinFuture.future_depth('btc_usd','this_week','6'))
#print (u'期货交易记录信息')
#print (okcoinFuture.future_trades('ltc_usd','this_week'))
#print (u'期货指数信息')
#print (okcoinFuture.future_index('ltc_usd'))
#print (u'美元人民币汇率')
#print (okcoinFuture.exchange_rate())
#print (u'获取预估交割价')
#print (okcoinFuture.future_estimated_price('ltc_usd'))
#print (u'获取全仓账户信息')
#print (okcoinFuture.future_userinfo())
#print (u'获取全仓持仓信息')
#print (okcoinFuture.future_position('ltc_usd','this_week'))
#print (u'期货下单')
#print (okcoinFuture.future_trade('ltc_usd','this_week','0.1','1','1','0','20'))
#print (u'期货批量下单')
#print (okcoinFuture.future_batchTrade('ltc_usd','this_week','[{price:0.1,amount:1,type:1,match_price:0},{price:0.1,amount:3,type:1,match_price:0}]','20'))
#print (u'期货取消订单')
#print (okcoinFuture.future_cancel('ltc_usd','this_week','47231499'))
#print (u'期货获取订单信息')
#print (okcoinFuture.future_orderinfo('ltc_usd','this_week','47231812','0','1','2'))
#print (u'期货逐仓账户信息')
#print (okcoinFuture.future_userinfo_4fix())
#print (u'期货逐仓持仓信息')
#print (okcoinFuture.future_position_4fix('ltc_usd','this_week',1))
| [
"noreply@github.com"
] | noreply@github.com |
9e64d3a61a0e00be82339404cdc6aa623e89ff36 | 9d02f04a38b23f4cc4cdeb5bf0ec2f8d0aa4e1bc | /oop2/cheatdice.py | 9e08d2f4a2521a977ebbba0cf74b5ddc6b5cc17c | [] | no_license | cronan-sde/mycode | 943679260e1c13ff944d776215a0b9fe754c1b46 | 184ca434acd876ab794d07f259266310a20805cc | refs/heads/main | 2023-02-12T04:23:52.724582 | 2021-01-11T17:52:48 | 2021-01-11T17:52:48 | 326,742,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | from random import randint
class Player:
def __init__(self):
self.dice = []
def roll(self):
self.dice = [] # clears current dice
for i in range(3):
self.dice.append(randint(1,6))
def get_dice(self):
return self.dice
class Cheat_Swapper(Player):
def cheat(self):
self.dice[-1] = 6
class Cheat_Loaded_Dice(Player):
def cheat(self):
i = 0
while i < len(self.dice):
if self.dice[i] < 6:
self.dice[i] += 1
i += 1
#add my own cheat class
class Cheat_Swapper_V2(Player):
def cheat(self):
for num in range(len(self.dice)):
self.dice[num] = 6
| [
"codycronberger@gmail.com"
] | codycronberger@gmail.com |
772a6b05963c1796c9a2f54b96ab884eee44995f | 067020d4bd39b6a2df300492c09b6cc65915ab71 | /engineerx/posts/modules/initialize.py | a4d80863fe566411bd4139a90152dae2e145ce37 | [] | no_license | HsnVahedi/engineerx-backend | 2e6d43079d94311f60089d052c278e2cbbfec76b | 018257fc53e2588aec2dd159922275d544147e18 | refs/heads/main | 2023-04-30T22:21:25.873313 | 2021-05-15T22:00:37 | 2021-05-15T22:00:37 | 336,623,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from posts.models import PostsPage
from home.models import HomePage
def create_posts_page(owner):
if PostsPage.objects.exists():
return
posts_page = PostsPage(title='Posts', owner=owner)
home_page = HomePage.objects.first()
home_page.add_child(instance=posts_page)
posts_page = PostsPage.objects.get(slug=posts_page.slug)
posts_page.save()
posts_page.save_revision().publish()
| [
"mcs.hsn.vahedi@gmail.com"
] | mcs.hsn.vahedi@gmail.com |
baf88f5c4ecfe9d00884bb0828984a5cfbb6f89b | cf62f7a7f9e13205fe83957fb7bfcf1b097bf481 | /docs/auto_rebuild.py | e0280183897075857ab708b4fc55a809538c4be9 | [
"Apache-2.0"
] | permissive | biothings/mygene.info | 09bf19f481c066789a4ad02a0d2880f31dae28f6 | fe1bbdd81bc29b412ca4288d3af38e47c0602ab7 | refs/heads/master | 2023-08-22T21:34:43.540840 | 2023-08-08T23:25:15 | 2023-08-08T23:25:18 | 54,933,630 | 89 | 20 | NOASSERTION | 2023-07-18T23:53:49 | 2016-03-29T00:36:49 | Python | UTF-8 | Python | false | false | 876 | py | import os.path
import subprocess
import tornado.autoreload
import tornado.ioloop
#an alternative:
#watchmedo shell-command --pattern="*.rst;*.py" --recursive --command="make html" .
included_ext = ['.py', '.rst', '.css', '.html']
def build():
subprocess.call('make html'.split())
#restart dev server
subprocess.call('touch ../src/index.py'.split())
def watch_rst(arg, dirname, fnames):
for fn in fnames:
for ext in included_ext:
if fn.endswith(ext):
f_path = os.path.join(dirname, fn)
tornado.autoreload.watch(f_path)
#print f_path, os.path.exists(f_path)
def main():
tornado.autoreload.add_reload_hook(build)
os.path.walk('.', watch_rst, None)
loop = tornado.ioloop.IOLoop.instance()
tornado.autoreload.start(loop)
loop.start()
if __name__ == '__main__':
main() | [
"anewgene@yahoo.com"
] | anewgene@yahoo.com |
a0abbc1ed0bab74222442b06db0a1214f2cf0b8a | a44d853d6a7354129d7fdfcf0f43e4f9a9106015 | /tests/mesh_utils_test.py | 8e2b29f7f37cc3baabd584c9ba35ddee05fc4abe | [
"Apache-2.0"
] | permissive | matthewfeickert/jax | 4f6b9ba2a96e1521f776886a08be38dd229f1402 | b0d96bd42440231cc7e98c61f52106f46578fca4 | refs/heads/main | 2021-12-10T06:03:36.919415 | 2021-12-09T06:04:13 | 2021-12-09T06:04:46 | 436,520,694 | 0 | 0 | Apache-2.0 | 2021-12-09T07:23:30 | 2021-12-09T07:23:29 | null | UTF-8 | Python | false | false | 6,407 | py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mesh utils."""
import dataclasses
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
from jax import test_util
from jax.experimental import mesh_utils
@dataclasses.dataclass
class MockTpuDevice:
"""Mock TPU device for testing."""
platform: str
device_kind: str
process_index: int
coords: Sequence[int]
core_on_chip: int
def mock_devices(x, y, z, dev_kind, two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8, 4x4x4."""
devices = []
process_index = 0
device_id = 0
for k in range(z):
for j in range(0, y, 2):
for i in range(0, x, 2):
# Local 2x2 subgrid of chips, with 2 cores per chip.
host_devices = [
MockTpuDevice('tpu', dev_kind, process_index, (i, j, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i, j, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i, j + 1, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i, j + 1, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j + 1, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j + 1, k), 1),
]
if two_cores_per_chip:
# Only include core_on_chip = 0.
host_devices = host_devices[::2]
devices.extend(host_devices)
device_id += len(host_devices)
process_index += 1
return devices
def mock_8x8_devices():
"""Hard-coded reproduction of jax.devices() output on 8x8."""
return mock_devices(8, 8, 1, 'TPU v3', False)
def mock_2x2x1_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 2x2x1."""
return mock_devices(2, 2, 1, 'TPU v4', two_cores_per_chip)
def mock_2x2x4_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 2x2x4."""
return mock_devices(2, 2, 4, 'TPU v4', two_cores_per_chip)
def mock_4x4x4_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x4x4."""
return mock_devices(4, 4, 4, 'TPU v4', two_cores_per_chip)
def mock_4x4x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x4x4."""
return mock_devices(4, 4, 8, 'TPU v4', two_cores_per_chip)
def mock_8x8x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8x8."""
return mock_devices(8, 8, 8, 'TPU v4', two_cores_per_chip)
def mock_4x8x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x8x8."""
return mock_devices(4, 8, 8, 'TPU v4', two_cores_per_chip)
def mock_4x8x16_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x8x16."""
return mock_devices(4, 8, 16, 'TPU v4', two_cores_per_chip)
def mock_8x8x16_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8x16."""
return mock_devices(8, 8, 16, 'TPU v4', two_cores_per_chip)
class PartitioningTest(test_util.JaxTestCase):
@parameterized.named_parameters(
('2x2x1_t', mock_2x2x1_devices, True, (2, 2, 1, 1)),
('2x2x1_f', mock_2x2x1_devices, False, (2, 2, 1, 2)),
('8x8x16_t', mock_8x8x16_devices, True, (8, 8, 16, 1)),
('8x8x16_f', mock_8x8x16_devices, False, (8, 8, 16, 2)),
)
def test_bounds_from_last_device(self, devices, two_cores_per_chip,
expected_bounds):
self.assertEqual(
mesh_utils._bounds_from_last_device(devices(two_cores_per_chip)[-1]),
expected_bounds)
@parameterized.named_parameters(
('4x4x4', mock_4x4x4_devices, (4, 4, 4)),
('4x4x8', mock_4x4x8_devices, (4, 4, 8)),
('8x8x8', mock_8x8x8_devices, (8, 8, 8)),
('8x8x16', mock_8x8x16_devices, (8, 8, 16)),
)
def test_jax_devices_order_normalized(self, devices, expected_shape):
jax_local_devices_from_process_0 = mock_2x2x1_devices(True)
jax_devices = devices(True)
normalized = mesh_utils._jax_devices_order_normalized(
jax_local_devices_from_process_0, jax_devices)
self.assertEqual(normalized.shape, expected_shape)
x, y, z = expected_shape
# major_to_minor: x, y, z
for i in range(x):
for j in range(y):
for k in range(z):
self.assertEqual(normalized[i, j, k].coords, (i, j, k))
@parameterized.named_parameters(
('2x2x1', mock_2x2x1_devices, [1, 1, 4], ((), (2,), (0, 1))),
('2x2x4', mock_2x2x4_devices, [1, 4, 4], ((), (2,), (0, 1))),
('4x4x4', mock_4x4x4_devices, [1, 16, 4], ((), (1, 2), (0,))),
('4x4x8a', mock_4x4x8_devices, [1, 16, 8], ((), (0, 1), (2,))),
('4x4x8b', mock_4x4x8_devices, [1, 8, 16], ((), (2,), (0, 1))),
('4x4x8c', mock_4x4x8_devices, [16, 8, 1], ((0, 1), (2,), ())),
('4x8x8', mock_4x8x8_devices, [1, 32, 8], ((), (0, 2), (1,))),
('8x8x8', mock_8x8x8_devices, [1, 64, 8], ((), (1, 2), (0,))),
('8x8x16', mock_8x8x16_devices, [1, 64, 16], ((), (0, 1), (2,))),
)
def test_create_device_mesh_for_tpu_v4(self, devices, mesh_shape,
expected_assignment):
jax_local_devices_from_process_0 = mock_2x2x1_devices(True)
jax_devices = devices(True)
physical_mesh = mesh_utils._jax_devices_order_normalized(
jax_local_devices_from_process_0, jax_devices)
_, assignment = mesh_utils._create_device_mesh_for_tpu_v4(
physical_mesh, mesh_shape)
self.assertEqual(assignment, expected_assignment)
if __name__ == '__main__':
absltest.main()
| [
"no-reply@google.com"
] | no-reply@google.com |
4a73745e4771aed4bae4a3a47744562e049ae160 | f7b0e3c6cdf08058066b1b514e796cda4a09a80e | /lfd/WIDERFACE_evaluation/evaluation.py | 73a7429add4798da1c00b4bcd0967ff56f176766 | [] | no_license | scott-mao/LFD-A-Light-and-Fast-Detector | beeb4a9915a426281ee6aff3d4be069f3647dbd4 | 4fd82d091fc75b6a070deba90887fa6b6a0dc90e | refs/heads/master | 2023-02-22T13:22:51.656687 | 2021-01-25T12:42:14 | 2021-01-25T12:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | # -*- coding: utf-8 -*-
import os
import math
from ..data_pipeline.augmentation import *
__all__ = ['SIO_evaluation']
def SIO_evaluation(model,
val_image_root,
results_save_root='.',
classification_threshold=0.5,
nms_threshold=0.3,
):
assert os.path.exists(val_image_root)
if not os.path.exists(results_save_root):
os.makedirs(results_save_root)
counter = 0
for parent, dir_names, file_names in os.walk(val_image_root):
for file_name in file_names:
if not file_name.lower().endswith(('.jpg', '.jpeg')):
continue
results = model.predict_for_single_image(
image=os.path.join(parent, file_name),
aug_pipeline=simple_widerface_val_pipeline,
classification_threshold=classification_threshold,
nms_threshold=nms_threshold
)
event_name = parent.split('/')[-1]
if not os.path.exists(os.path.join(results_save_root, event_name)):
os.makedirs(os.path.join(results_save_root, event_name))
fout = open(os.path.join(results_save_root, event_name, file_name.split('.')[0] + '.txt'), 'w')
fout.write(file_name.split('.')[0] + '\n')
fout.write(str(len(results) + 1) + '\n')
fout.write('0 0 0 0 0.001\n')
for bbox in results:
fout.write('%d %d %d %d %.03f' % (math.floor(bbox[2]), math.floor(bbox[3]), math.ceil(bbox[4]), math.ceil(bbox[5]), bbox[1] if bbox[1] <= 1 else 1) + '\n')
fout.close()
counter += 1
print('[%5d] %s is processed.' % (counter, file_name))
| [
"scu0743111019"
] | scu0743111019 |
d3366b8875c54405497810ad860a6ad92779b450 | 2265c393b8396292b79fdbcdd08727be24c2337a | /tbonlineproject/relatedcontent/models.py | 2795965bfa7595e7eab4cec3e5338a95be54a301 | [
"MIT"
] | permissive | nathangeffen/tbonline-2 | 4275b2f970170f01f62e01ade008ab5cd1aee0d5 | 0d5869197e66a0057fa07cb99f21dde7f5b47c30 | refs/heads/master | 2023-01-07T08:43:35.261568 | 2019-03-31T15:54:16 | 2019-03-31T15:54:16 | 30,840,752 | 0 | 0 | MIT | 2022-12-26T20:18:09 | 2015-02-15T20:24:49 | Python | UTF-8 | Python | false | false | 3,347 | py | from django.db import models
# Create your models here.
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from enhancedtext.fields import EnhancedTextField
TYPES_OF_RELATED_CONTENT = (
('00', _('Related articles')),
('05', _('Further Reading')),
('10', _('See also')),
('15', _('Source')),
('20', _('Reference'))
)
class Webpage(models.Model):
"""Represents manually maintained links to external web pages for display,
say, on the front page of a website.
"""
title = models.CharField(max_length=200)
url = models.CharField(max_length=200,
verbose_name=_('URL'))
byline = models.CharField(blank=True, max_length=200,
help_text=_('The institution or organisation '
'that produces this website. There is no '
'problem with leaving this blank.'))
date = models.DateField(blank=True, null=True,
help_text=_('Sometimes it is useful to include the '
'date a blog was written. But mostly this '
'field will be left blank.'))
html_A_tag_options = models.CharField(max_length=200, blank=True,
help_text=_('You can put link, title and other '
'HTML A tag attributes here. '
'Leave blank if you are unsure.'))
description = EnhancedTextField(blank=True, default="\W")
date_last_edited = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return self.title
class Meta:
ordering = ['date_last_edited',]
verbose_name = _('webpage')
verbose_name_plural = _('webpages')
class RelatedContent(models.Model):
'''Model for representing additional reading links that can be attached
to articles.
'''
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
webpage = models.ForeignKey(Webpage,
verbose_name=_('link'))
category = models.CharField(max_length=2,
choices=TYPES_OF_RELATED_CONTENT,
default='05')
position = models.PositiveIntegerField(default=0, blank=True, null=True)
@staticmethod
def get_related_content(model_instance=None, content_type=None, object_id=None):
'''Returns all instances on this model which point to either the given model_instance
or the model instance specified by content_type and object_id.
Either pass model_instance or content_type and object_id. Don't pass both.
'''
if model_instance:
content_type = ContentType.objects.get_for_model(model_instance)
object_id = model_instance.pk
return RelatedContent.objects.filter(content_type=content_type, object_id=object_id)
def __unicode__(self):
return unicode(self.content_type) + u' - ' + unicode(self.webpage)
class Meta:
verbose_name = _("related content")
verbose_name_plural = _("related content")
ordering = ('content_type', 'object_id', 'category', 'position',)
| [
"nathangeffen@gmail.com"
] | nathangeffen@gmail.com |
0c4592163732241c956a602193eb7cee2f1e960e | c9299c10a175a8d925839adc58bbc7f86d4650f5 | /weartracker/settings.py | e2afabeef497b577ab383b4ac93e3fc2862d6f72 | [] | no_license | bartwroblewski/strava_gear_wear_tracker | e2e082a04cfdd226b24f59d64f1a0651224eba68 | e12e3cd559d096668525ce2e0bd6047d3d508c4c | refs/heads/master | 2023-09-04T13:58:42.909991 | 2023-08-20T18:52:06 | 2023-08-20T18:52:06 | 296,386,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,003 | py | """
Django settings for weartracker project.
Generated by 'django-admin startproject' using Django 3.0.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# Strava credentials
CLIENT_ID = os.environ['STRAVA_CLIENT_ID']
CLIENT_SECRET = os.environ['STRAVA_CLIENT_SECRET']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = True
DOMAIN = 'stravageartracker.herokuapp.com'#'localhost'#'f918bae6f5b0.ngrok.io'#
ALLOWED_HOSTS = [DOMAIN]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tracker',
'frontend',
'rest_framework',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weartracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weartracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
#STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
REACT_APP_DIR = os.path.join(BASE_DIR, 'frontend')
STATICFILES_DIRS = [
os.path.join(REACT_APP_DIR, 'static', 'frontend'),
]
COMPRESS_ENABLED = os.environ.get('COMPRESS_ENABLED', False)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'geartracker0@gmail.com'
EMAIL_HOST_PASSWORD = 'zshpzozgyugleqpn' #past the key or password app here
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'default from email'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': ('%(asctime)s [%(process)d] [%(levelname)s] ' +
'pathname=%(pathname)s lineno=%(lineno)s ' +
'funcname=%(funcName)s %(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'testlogger': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
django_heroku.settings(locals()) | [
"barti.wroblewski@gmail.com"
] | barti.wroblewski@gmail.com |
3369acd1cb1d638a9339030ec77a12c51053f8d3 | fb031eefb0fa54b742559dfd1dfaab28434c40ed | /hy-data-analysis-with-python-2020/part01-e18_acronyms/src/acronyms.py | 0bdaeef35a8b471ad27c8ed8b374462d7ccf278f | [] | no_license | nroovers/data-analysis-python-course | d2ad0aeb00b1106c1b7b86880c9d0a39677f59f1 | 692f020ad1c3da6855bf1a684a7148a71e766952 | refs/heads/main | 2023-03-05T09:39:50.547189 | 2021-02-21T14:28:52 | 2021-02-21T14:28:52 | 338,845,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #!/usr/bin/env python3
def acronyms(s):
return [i.strip('.,()') for i in list(s.split())
if i.isupper()]
def main():
print(acronyms("""For the purposes of the EU General Data Protection Regulation (GDPR), the controller of your personal information is International Business Machines Corporation (IBM Corp.), 1 New Orchard Road, Armonk, New York, United States, unless indicated otherwise. Where IBM Corp. or a subsidiary it controls (not established in the European Economic Area (EEA)) is required to appoint a legal representative in the EEA, the representative for all such cases is IBM United Kingdom Limited, PO Box 41, North Harbour, Portsmouth, Hampshire, United Kingdom PO6 3AU."""))
if __name__ == "__main__":
main()
| [
"nroovers@gmail.com"
] | nroovers@gmail.com |
2f1a55b3da82432efa2afe619c76cef6cdaf8814 | c431b7c44aea656c3388e4d18c0fed48250facfe | /euler_47.py | e16164b824a40c363d445ff483c42fc78a21ae98 | [] | no_license | dfturn/Project-Euler | 09d462cdf0d928fc8ba4f2138c7ea43418d6a94a | 46f9e35a345bdb2296d31604e97ee7b10a414378 | refs/heads/master | 2020-04-02T13:47:17.094953 | 2014-12-18T20:01:44 | 2014-12-18T20:01:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | import os
import pickle
import time
primes_file = 'euler_35_primes_to_million.txt'
primes = []
if os.path.exists( primes_file ):
with open(primes_file, 'rb') as f:
primes = pickle.load(f)
def primeFactors(n):
factors = set()
i = 0
while True:
if n % primes[i] == 0:
n = n/primes[i]
factors.add(primes[i])
if n == 1:
break
elif primes[i] > n:
break
else:
i += 1
return factors
def work():
ci = 0
ns = 4
for i in xrange(200,1000000):
facs = primeFactors(i)
if len(facs) == ns: ci += 1
else: ci = 0
if ci == 4:
print i - 3
print facs
break
work() | [
"dfturn@gmail.com"
] | dfturn@gmail.com |
9007c8f9d8308e19c701db355531585e8a40045a | 6e23fbb1fea030984bd696875290b828e42e0c37 | /Oops/Student.py | 97ff4f6f74452299f2c2c9f85f3340f0937bde0a | [] | no_license | pratapreddyavula1/Python-Files | 04014c0c4a77f77e35931ac230b8697895c236de | e2404877993ca98cb47d396e3720221b30cb27ae | refs/heads/main | 2023-08-02T20:11:43.242427 | 2021-10-06T04:46:42 | 2021-10-06T04:46:42 | 414,070,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | from Human import human
class Student(human):
def StudntDetails(self):
self.input()
self.sname=input("Enter Student name:")
self.sid=int(input("Enter Student Id:"))
self.cls=input("Enter Student Class:")
self.maths=int(input("Enter Maths Marks:"))
self.social = int(input("Enter Social Marks:"))
self.hindi = int(input("Enter Hindi Marks:"))
self.english=int(input("Enter English Marks:"))
self.Total()
def Total(self):
self.total=self.maths+self.social+self.hindi+self.english
self.avgs=self.total/4
self.Grade="A+" if self.avgs>=90 else"A" if (self.avgs<90 and self.avgs>=75) else "B" if (self.avgs<75 and self.avgs>=45) else"c" if self.avgs>=35 else"F"
self.display()
def display(self):
print("**********************************")
print("Student Name:",self.sname)
print()
print("Student Id:",self.sid)
print()
print("Student Class:",self.cls)
print()
print("Student Total:",self.total)
print()
print("Student Average:",self.avgs)
print()
print("Student Grade:",self.Grade)
| [
"noreply@github.com"
] | noreply@github.com |
3eb61005fad14bca9cf5fd376de4dce1fcd4bddd | e8881b5eeab336273bb521b4db40783170f479dc | /vagrant/restaurant/finalProject.py | d1527fc612fdced0dafed11727f946e81cab02a9 | [] | no_license | RustyHoff/fullstack-nanodegree-vm | 9e0772c13b102986239aa1222b6d13c436d110ff | a826a652dc9fc3642c1151db571286748db42bcf | refs/heads/master | 2020-04-08T09:33:04.226681 | 2017-02-26T04:23:32 | 2017-02-26T04:23:32 | 50,117,590 | 0 | 0 | null | 2016-01-21T15:44:51 | 2016-01-21T15:44:51 | Python | UTF-8 | Python | false | false | 6,100 | py | from flask import Flask, render_template, url_for, request, redirect, flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
app = Flask(__name__)
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/restaurants/')
def showRestaurants():
# DONE #
#return "This page will show all my restaurants."
restaurants = session.query(Restaurant).all()
return render_template('restaurants.html', restaurants = restaurants)
@app.route('/restaurant/new/', methods=['GET', 'POST'])
def newRestaurant():
# DONE #
#return "This page will be for making a new restaurant."
if request.method == 'POST':
new_restaurant = Restaurant(name = request.form['name'])
session.add(new_restaurant)
session.commit()
return redirect(url_for('showRestaurants'))
else:
return render_template('newRestaurant.html')
@app.route('/restaurant/<int:restaurant_id>/menu/new/', methods=['GET','POST'])
def newMenuItem(restaurant_id):
# DONE #
#return "This page is for making a new menu item for restaurant %s." % restaurant_id
if request.method == 'POST':
makeMenuItem = MenuItem(name = request.form['name'],
course = request.form['course'],
description = request.form['description'],
price = request.form['price'],
restaurant_id = restaurant_id)
session.add(makeMenuItem)
session.commit()
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template('newMenuItem.html', restaurant_id = restaurant_id)
@app.route('/restaurant/<int:restaurant_id>/edit/', methods=['GET', 'POST'])
def editRestaurant(restaurant_id):
# DONE #
#return "This page will be for editing restaurant %s." % restaurant_id
editedRestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
if request.method == 'POST':
if request.form['name']:
editedRestaurant.name = request.form['name']
session.add(editedRestaurant)
session.commit()
return redirect(url_for('showRestaurants'))
else:
return render_template('editRestaurant.html', restaurant = editedRestaurant)
@app.route('/restaurant/<int:restaurant_id>/delete/', methods=['GET','POST'])
def deleteRestaurant(restaurant_id):
# DONE #
#return "This page will be for deleting restaurant %s." % restaurant_id
restaurantToDelete = session.query(Restaurant).filter_by(id = restaurant_id).one()
allMenuItems = session.query(MenuItem).filter_by(restaurant_id = restaurant_id)
if request.method == 'POST':
session.delete(restaurantToDelete)
for everything in allMenuItems:
session.delete(everything)
session.commit()
return redirect(url_for('showRestaurants'))
else:
return render_template('deleteRestaurant.html', restaurant = restaurantToDelete, restaurant_id = restaurantToDelete.id)
@app.route('/restaurant/<int:restaurant_id>/')
@app.route('/restaurant/<int:restaurant_id>/menu/')
def showMenu(restaurant_id):
# DONE #
#return "This page is the menu for restaurant %s." % restaurant_id
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id)
return render_template('menu.html', restaurant = restaurant, items = items)
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit/', methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
# DONE #
#return "This page is for editing menu item %s." % menu_id
editedItem = session.query(MenuItem).filter_by(id = menu_id).one()
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['price']:
editedItem.price = request.form['price']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['course']:
editedItem.course = request.form['course']
session.add(editedItem)
session.commit()
return redirect(url_for('showMenu', restaurant_id = restaurant.id))
else:
return render_template('editMenuItem.html', item = editedItem, restaurant = restaurant)
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete', methods=['GET','POST'])
def deleteMenuItem(restaurant_id, menu_id):
# DONE #
#return "This page is for deleting menu item %s." % menu_id
itemToDelete = session.query(MenuItem).filter_by(id = menu_id).one()
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
return redirect(url_for('showMenu', restaurant_id = restaurant.id))
else:
return render_template('deleteMenuItem.html', item = itemToDelete, restaurant = restaurant)
### JSON API Endpoints ###
@app.route('/restaurant/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJSON(restaurant_id, menu_id):
menuItem = session.query(MenuItem).filter_by(id = menu_id).one()
return jsonify(MenuItem = menuItem.serialize)
@app.route('/restaurants/JSON')
def restaurantsJSON():
restaurants = session.query(Restaurant).all()
return jsonify(Restaurant = [i.serialize for i in restaurants])
if __name__ == '__main__':
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
| [
"RustyHoff@users.noreply.github.com"
] | RustyHoff@users.noreply.github.com |
dbaa1b5ced49279a5ec1cba62afccfee5770b178 | e3fa9006d65186fab6194b6d3f9b3e096d79f4bc | /api/const.py | 7295a58f36a55e989f1b9c6dca389e47b2dc0e52 | [] | no_license | yancai/report | 714bee225b45471d69968290886a3c8d59c726f9 | 9dddde864b9698943c3d0845994560f2e828a4c1 | refs/heads/master | 2023-02-21T06:33:39.401779 | 2020-02-12T13:54:50 | 2020-02-12T13:54:50 | 166,948,160 | 0 | 0 | null | 2023-02-15T21:20:34 | 2019-01-22T07:28:12 | HTML | UTF-8 | Python | false | false | 288 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
KEY_USER_ID = "user_id"
KEY_USER_NAME = "user_name"
KEY_DATE_STR = "date_str"
KEY_YESTERDAY = "yesterday"
KEY_TODAY = "today"
KEY_DOMAIN = "domain"
PY_VERSION = "3" if sys.version >= "3" else "2"
if __name__ == "__main__":
pass
| [
"yancai915@gmail.com"
] | yancai915@gmail.com |
10e3e838a5a6c0e937e453765f9a61bb9f30cbaa | 572107468a93152774849373d09cb1ecb3b60998 | /members/migrations/0001_initial.py | 237afb6a3316ceec9e1999400ad26e9db96f2a10 | [] | no_license | parkhongbeen/instagram-restudy | 794d9962d37aec56961cee4c145de6dc291d3d40 | 5587d23163e6242e5deb5dbe1051caae179c6eb4 | refs/heads/master | 2020-12-09T18:12:10.783801 | 2020-01-12T11:16:01 | 2020-01-12T11:16:01 | 233,380,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,861 | py | # Generated by Django 2.2.9 on 2020-01-10 05:13
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"pack122@naver.com"
] | pack122@naver.com |
350a01c77e36e638520d42ca6328484de8659c59 | c301998433dcbc304e37e557f34a9050ca2929e2 | /src/model.py | 30122c4c224728d059e24f4e2c49a5af224bde11 | [] | no_license | y9dai/emosta_proto | 0ac6c7dc8ea7bcc3990c2ec9cc15ebfe02cb549f | 3769bf9053e43fa2561097a7fd0b201e9157b362 | refs/heads/master | 2020-06-25T04:40:58.344498 | 2019-07-28T15:18:54 | 2019-07-28T15:18:54 | 199,204,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,918 | py | from __future__ import division
import os
# import sys
# sys.path.append('../')
import time
from time import time
import tensorflow as tf
import numpy as np
from tqdm import *
import copy
import logging
import json
from collections import OrderedDict
from colorama import init
from colorama import Fore, Back, Style
class SeqGAN():
def __init__(self, sess, batch_size):
## graph
self.sess = sess
self.batch_size = batch_size
self.rnn_cell = 'gru'
self.G_hidden_size = 512
# ## dataset
self.vocab_size = 26867
self.image_feat_dim = 4096*3
self.max_words = 70 # contain the <S> and </S>
self.lstm_steps = self.max_words
vocab_file = './models/word2id_5.json'
self.word2ix = json.load(open(vocab_file, 'r'))
self.ix2word = {value: key for key,value in self.word2ix.items()}
self.START = self.word2ix['<S>']
self.END = self.word2ix['</S>']
self.UNK = self.word2ix['<UNK>']
## placeholder
# image feat
self.image_feat = tf.placeholder(tf.float32, [self.batch_size, self.image_feat_dim])
self.sample_words_for_loss = tf.placeholder(tf.int32, [self.batch_size, self.max_words])
# rollout_reward
self.discounted_reward = tf.placeholder(tf.float32, [self.max_words*self.batch_size])
############################## Generator #############################
sample_words_list = self.generator(name="G", reuse=False)
sample_words_list_argmax = self.generator_test(name="G", reuse=True)
sample_words = tf.stack(sample_words_list)
self.sample_words = tf.transpose(sample_words, [1,0]) # B,S
sample_words_argmax = tf.stack(sample_words_list_argmax)
self.sample_words_argmax = tf.transpose(sample_words_argmax, [1,0]) # B,S
############################## Record parameters #####################
load_G_params = tf.global_variables()
params = tf.trainable_variables()
self.load_G_params_dict = {}
self.G_params = []
self.G_params_dict = {}
for param in load_G_params:
if 'G' in param.name:
self.load_G_params_dict.update({param.name: param})
for param in params:
if 'G' in param.name:
self.G_params.append(param)
self.G_params_dict.update({param.name: param})
logging.info(Fore.GREEN + 'Build graph complete!')
def batch_norm(self, x, mode='train', name=None):
return tf.contrib.layers.batch_norm(inputs=x,
decay=0.95,
center=True,
scale=True,
is_training=(mode=='train'),
updates_collections=None,
scope=(name+'batch_norm'))
def build_loss(self, name="generator", reuse=True):
'''
To compute loss.
First feed in the sampled words to get the probabilities, then compute loss with
rewards got in advance.
This function is added to correct the mistake caused by the twice 'sess.run' that
multinomial twice.
'''
random_uniform_init = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
with tf.device("/cpu:0"), tf.variable_scope("word"):
# name: "gnerator/word"
word_emb_W = tf.get_variable("word_emb_W", [self.vocab_size, self.G_hidden_size], tf.float32, random_uniform_init)
with tf.variable_scope("image_feat"):
# name: "generator/image_feat"
image_feat_W = tf.get_variable("image_feat_W", [self.image_feat_dim, self.G_hidden_size], tf.float32, random_uniform_init)
image_feat_b = tf.get_variable("image_feat_b", [self.G_hidden_size], tf.float32, random_uniform_init)
with tf.variable_scope("output"):
# name: "generator/output"
output_W = tf.get_variable("output_W", [self.G_hidden_size, self.vocab_size], tf.float32, random_uniform_init)
output_b = tf.get_variable("output_b", [self.vocab_size], tf.float32, random_uniform_init)
with tf.variable_scope("lstm_encoder"):
if self.rnn_cell == 'lstm':
encoder = tf.nn.rnn_cell.LSTMCell(self.G_hidden_size, state_is_tuple=True)
elif self.rnn_cell == 'gru':
encoder = tf.nn.rnn_cell.GRUCell(self.G_hidden_size)
with tf.variable_scope("lstm_decoder"):
# WONT BE CREATED HERE
if self.rnn_cell == 'lstm':
decoder = tf.nn.rnn_cell.LSTMCell(self.G_hidden_size, state_is_tuple=True)
elif self.rnn_cell == 'gru':
decoder = tf.nn.rnn_cell.GRUCell(self.G_hidden_size)
#============================= dropout ===================================================================
# to be confirmed
if self.rnn_drop > 0:
logging.debug(Fore.CYAN + 'using dropout in rnn!')
encoder = tf.nn.rnn_cell.DropoutWrapper(encoder, input_keep_prob=1.0-self.rnn_drop, output_keep_prob=1.0)
decoder = tf.nn.rnn_cell.DropoutWrapper(decoder, input_keep_prob=1.0, output_keep_prob=1.0-self.rnn_drop)
#============================= encoder ===================================================================
state = encoder.zero_state(self.batch_size, tf.float32)
with tf.variable_scope("image_feat") as scope:
image_feat = self.batch_norm(self.image_feat[:,:], mode='test', name='')
image_feat_emb = tf.matmul(image_feat, image_feat_W) + image_feat_b # B,H
lstm_input = image_feat_emb
with tf.variable_scope("lstm_encoder") as scope:
_, state = encoder(lstm_input, state)
encoder_state = state
#============================= decoder ===================================================================
start_token = tf.constant(self.START, tf.int32, [self.batch_size])
mask = tf.constant(True, "bool", [self.batch_size])
log_probs_action_picked_list = []
sample_mask_list = []
## modified to run story model
state = encoder_state
for j in range(self.lstm_steps):
with tf.device("/cpu:0"):
if j == 0:
decoder_input = tf.nn.embedding_lookup(word_emb_W, start_token)
else:
decoder_input = tf.nn.embedding_lookup(word_emb_W, self.sample_words_for_loss[:,j-1])
with tf.variable_scope("lstm"):
if not j == 0:
tf.get_variable_scope().reuse_variables()
output, state = decoder(decoder_input, state)
logits = tf.matmul(output, output_W) + output_b
log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits), 1e-20, 1.0)) # B,Vocab_size # add 1e-8 to prevent log(0)
# check if the end of the sentence
# mask_step -> "current" step, predict has "1 delay"
sample_mask_list.append(tf.to_float(mask))
action_picked = tf.range(self.batch_size) * self.vocab_size + tf.to_int32(self.sample_words_for_loss[:,j]) # B
log_probs_action_picked = tf.multiply(tf.gather(tf.reshape(log_probs, [-1]), action_picked), tf.to_float(mask)) # propabilities for picked actions
log_probs_action_picked_list.append(log_probs_action_picked)
prev_mask = mask
mask_judge = tf.not_equal(self.sample_words_for_loss[:,j], self.END) # B # return the bool value
mask = tf.logical_and(prev_mask, mask_judge)
sample_mask_list = tf.stack(sample_mask_list) # S,B
sample_mask_list = tf.transpose(sample_mask_list, [1,0]) # B,S
log_probs_action_picked_list = tf.stack(log_probs_action_picked_list) # S,B
log_probs_action_picked_list = tf.reshape(log_probs_action_picked_list, [-1]) # S*B
loss = -1 * tf.reduce_sum(log_probs_action_picked_list * self.discounted_reward)/ \
tf.reduce_sum(sample_mask_list)
return loss
def generator(self, name="generator", reuse=False):
'''
Caption sampler, sample words follow the probability distribution.
'''
random_uniform_init = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
with tf.device("/cpu:0"), tf.variable_scope("word"):
# name: "gnerator/word"
word_emb_W = tf.get_variable("word_emb_W", [self.vocab_size, self.G_hidden_size], tf.float32, random_uniform_init)
with tf.variable_scope("image_feat"):
# name: "generator/image_feat"
image_feat_W = tf.get_variable("image_feat_W", [self.image_feat_dim, self.G_hidden_size], tf.float32, random_uniform_init)
image_feat_b = tf.get_variable("image_feat_b", [self.G_hidden_size], tf.float32, random_uniform_init)
with tf.variable_scope("output"):
# name: "generator/output"
output_W = tf.get_variable("output_W", [self.G_hidden_size, self.vocab_size], tf.float32, random_uniform_init)
output_b = tf.get_variable("output_b", [self.vocab_size], tf.float32, random_uniform_init)
with tf.variable_scope("lstm_encoder"):
if self.rnn_cell == 'lstm':
encoder = tf.nn.rnn_cell.LSTMCell(self.G_hidden_size, state_is_tuple=True)
elif self.rnn_cell == 'gru':
encoder = tf.nn.rnn_cell.GRUCell(self.G_hidden_size)
with tf.variable_scope("lstm_decoder"):
# WONT BE CREATED HERE
if self.rnn_cell == 'lstm':
decoder = tf.nn.rnn_cell.LSTMCell(self.G_hidden_size, state_is_tuple=True)
elif self.rnn_cell == 'gru':
decoder = tf.nn.rnn_cell.GRUCell(self.G_hidden_size)
#============================= encoder ===================================================================
state = encoder.zero_state(self.batch_size, tf.float32)
with tf.variable_scope("image_feat") as scope:
image_feat = self.batch_norm(self.image_feat[:,:], mode='train', name='')
image_feat_emb = tf.matmul(image_feat, image_feat_W) + image_feat_b # B,H
lstm_input = image_feat_emb
with tf.variable_scope("lstm_encoder") as scope:
_, state = encoder(lstm_input, state)
encoder_state = state
#============================= decoder ===================================================================
start_token = tf.constant(self.START, tf.int32, [self.batch_size])
mask = tf.constant(True, "bool", [self.batch_size])
sample_words = []
state = encoder_state
for j in range(self.lstm_steps):
with tf.device("/cpu:0"):
if j == 0:
decoder_input = tf.nn.embedding_lookup(word_emb_W, start_token)
else:
decoder_input = tf.nn.embedding_lookup(word_emb_W, sample_word)
with tf.variable_scope("lstm"):
if not j == 0:
tf.get_variable_scope().reuse_variables()
output, state = decoder(decoder_input, state)
logits = tf.matmul(output, output_W) + output_b
log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits), 1e-20, 1.0)) # B,Vocab_size # add 1e-8 to prevent log(0)
# sample once from the multinomial distribution
# Montecarlo sampling
sample_word = tf.reshape(tf.multinomial(log_probs, 1), [self.batch_size]) # 1 means sample once
sample_words.append(sample_word)
return sample_words
def generator_test(self, name="generator", reuse=True):
'''
Caption generator. Generate words with max probabilities.
'''
random_uniform_init = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
with tf.device("/cpu:0"), tf.variable_scope("word"):
# name: "gnerator/word"
word_emb_W = tf.get_variable("word_emb_W", [self.vocab_size, self.G_hidden_size], tf.float32, random_uniform_init)
with tf.variable_scope("image_feat"):
# name: "generator/image_feat"
image_feat_W = tf.get_variable("image_feat_W", [self.image_feat_dim, self.G_hidden_size], tf.float32, random_uniform_init)
image_feat_b = tf.get_variable("image_feat_b", [self.G_hidden_size], tf.float32, random_uniform_init)
with tf.variable_scope("output"):
# name: "generator/output"
output_W = tf.get_variable("output_W", [self.G_hidden_size, self.vocab_size], tf.float32, random_uniform_init)
output_b = tf.get_variable("output_b", [self.vocab_size], tf.float32, random_uniform_init)
with tf.variable_scope("lstm_encoder"):
if self.rnn_cell == 'lstm':
encoder = tf.nn.rnn_cell.LSTMCell(self.G_hidden_size, state_is_tuple=True)
elif self.rnn_cell == 'gru':
encoder = tf.nn.rnn_cell.GRUCell(self.G_hidden_size)
with tf.variable_scope("lstm_decoder"):
# WONT BE CREATED HERE
if self.rnn_cell == 'lstm':
decoder = tf.nn.rnn_cell.LSTMCell(self.G_hidden_size, state_is_tuple=True)
elif self.rnn_cell == 'gru':
decoder = tf.nn.rnn_cell.GRUCell(self.G_hidden_size)
#============================= encoder ===================================================================
state = encoder.zero_state(self.batch_size, tf.float32)
with tf.variable_scope("image_feat") as scope:
image_feat = self.batch_norm(self.image_feat[:,:], mode='test', name='')
image_feat_emb = tf.matmul(image_feat, image_feat_W) + image_feat_b # B,H
lstm_input = image_feat_emb
with tf.variable_scope("lstm_encoder") as scope:
_, state = encoder(lstm_input, state)
encoder_state = state
#============================= decoder ===================================================================
start_token = tf.constant(self.START, tf.int32, [self.batch_size])
sample_words = []
mask = tf.constant(True, "bool", [self.batch_size])
state = encoder_state
for j in range(self.lstm_steps):
with tf.device("/cpu:0"):
if j == 0:
decoder_input = tf.nn.embedding_lookup(word_emb_W, start_token)
else:
decoder_input = tf.nn.embedding_lookup(word_emb_W, sample_word)
with tf.variable_scope("lstm"):
if not j == 0:
tf.get_variable_scope().reuse_variables()
output, state = decoder(decoder_input, state)
logits = tf.matmul(output, output_W) + output_b
probs = tf.nn.softmax(logits)
log_probs = tf.log(probs + 1e-8) # B,Vocab_size # add 1e-8 to prevent log(0)
# sample the word with highest probability
# remove <UNK>
left = tf.slice(probs, [0,0], [-1,self.UNK])
right = tf.slice(probs, [0,self.UNK+1], [-1,self.vocab_size-self.UNK-1])
zeros = tf.zeros([self.batch_size, 1])+1e-20
probs_no_unk = tf.concat([left,zeros,right], 1)
sample_word = tf.reshape(tf.argmax(probs_no_unk, 1), [self.batch_size])
sample_words.append(sample_word)
return sample_words
def load(self, checkpoint_dir):
print(' [*] Reading checkpoints ...')
model_dir = '%s' % self.dataset_name
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
def decode(self, word_lists, type='string', wo_start=False, rm_end=False):
'''
Join the discrete words in word lists to compose sentences.
Process batch_size*max_words generated stories.
'''
if type == 'string':
processed_sentences = []
for word_list in word_lists:
sentence = []
for word_ix in word_list[0:self.max_words]:
sentence.append(self.ix2word[word_ix])
if '</S>' in sentence:
punctuation = np.argmax(np.array(sentence) == '</S>') + 1
sentence = sentence[:punctuation]
sentence = ' '.join(sentence)
sentence = sentence.replace('<S> ', '')
sentence = sentence.replace(' </S>', '')
sentence = sentence.replace(' \n ', '\n')
if sentence.strip() == '':
continue
processed_sentences.append(sentence)
return processed_sentences
def load_params(self, ckpt_file):
tf.global_variables_initializer().run()
logging.info(Fore.GREEN + 'Init model from %s ...' % ckpt_file)
G_saver = tf.train.Saver(self.load_G_params_dict)
G_saver.restore(self.sess, ckpt_file)
def test_one_image(self, image_feature):
feed_dict = {self.image_feat: image_feature}
generated_words = self.sess.run(self.sample_words_argmax, feed_dict)
generated_sentences = self.decode(generated_words, type='string', wo_start=True, rm_end=True)
return generated_sentences
| [
"y9nagie@gmail.com"
] | y9nagie@gmail.com |
404d772e9f913c90fd54e1ed82b4691f76b47fc4 | 66213c48da0b752dc6c350789935fe2b2b9ef5ca | /abc/115/d_.py | cb8036d381327e5ffd4f0470a37d3047600699e7 | [] | no_license | taketakeyyy/atcoder | 28c58ae52606ba85852687f9e726581ab2539b91 | a57067be27b27db3fee008cbcfe639f5309103cc | refs/heads/master | 2023-09-04T16:53:55.172945 | 2023-09-04T07:25:59 | 2023-09-04T07:25:59 | 123,848,306 | 0 | 0 | null | 2019-04-21T07:39:45 | 2018-03-05T01:37:20 | Python | UTF-8 | Python | false | false | 1,047 | py | # -*- coding:utf-8 -*-
import sys
def solve():
N, X = list(map(int, sys.stdin.readline().split()))
As = [1] # レベルiバーガーの厚さ(層の総数)(必ず奇数)
Ps = [1] # レベルiバーガーのパティの総数
for i in range(N):
As.append(As[i]*2 + 3) # レベルが1上がると、総数は2倍+3になる
Ps.append(Ps[i]*2 + 1) # レベルが1上がると、パティの数は2倍+1になる
# dp[i][x] := レベルiバーガーの下からx層に含まれているパティの総数
dp = [[0]*(X+1) for _ in range(2)]
dp[0][0] = 0
for i in range(1, X+1):
dp[0][i] = 1
# 漸化式を解く
for i in range(1, 2):
median = (As[i]+1)//2
for x in range(X+1):
if x < median:
dp[i&1][x] = dp[i-1][x-1]
elif x == median:
dp[i][x] = Ps[i-1] + 1
else:
dp[i][x] = Ps[i-1] + 1 + dp[i-1][x-median]
print(dp[N][X])
if __name__ == "__main__":
solve() | [
"taketakeyyy@gmail.com"
] | taketakeyyy@gmail.com |
cb4bb7e32196ee556a22411d73b8991a209a6a98 | 7f0bcc27cc74cbe901e4a31bbff71e299a2fa681 | /apps/HelloWorld/index.py | 8cf62a5a5da17b1b4831f0f16502267b7fc5ac15 | [] | no_license | JiangEndian/learngit | 4682826149472f942a6219b8d8b3cb7c276635e5 | e479b7efcc97c12120bd284259e8da0407c17a53 | refs/heads/master | 2021-07-18T20:48:28.063374 | 2019-05-11T11:16:02 | 2019-05-11T11:16:02 | 90,952,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,417 | py | from django.http import HttpResponseRedirect
from django.shortcuts import render
import os
from MyPython3 import readffile
def index(request):
restudy_info = {}
restudy_info['alt1'] = '进度中alt1'
restudy_info['alt1_common'] = 'alt1_common'
restudy_info['alt2'] = '进度中alt2'
restudy_info['alt2_common'] = 'alt2_common'
restudy_info['alt3'] = '进度中alt3'
restudy_info['alt3_common'] = 'alt3_common'
restudy_info['alt4'] = '进度中alt4'
restudy_info['alt4_common'] = 'alt4_common'
restudy_info['plan_endian'] = readffile('plan_endian/everydaytodolist.txt')
if os.path.exists('new_gs/4web_restudy/已复习') and not os.path.exists('new_gs/4web_restudy/common_info'):
restudy_info['alt1'] = 'alt1已复习'
restudy_info['alt1_common'] = ''
elif not os.path.exists('new_gs/4web_restudy/common_info'):
restudy_info['alt1'] = 'alt1'
restudy_info['alt1_common'] = ''
if os.path.exists('language_voice_diction_korean/4web_restudy/已复习') and not os.path.exists('language_voice_diction_korean/4web_restudy/common_info'):
restudy_info['alt2'] = 'alt2已复习'
restudy_info['alt2_common'] = ''
elif not os.path.exists('language_voice_diction_korean/4web_restudy/common_info'):
restudy_info['alt2'] = 'alt2'
restudy_info['alt2_common'] = ''
if os.path.exists('language_voice_diction_english/4web_restudy/已复习') and not os.path.exists('language_voice_diction_english/4web_restudy/common_info'):
restudy_info['alt3'] = 'alt3已复习'
restudy_info['alt3_common'] = ''
elif not os.path.exists('language_voice_diction_english/4web_restudy/common_info'):
restudy_info['alt3'] = 'alt3'
restudy_info['alt3_common'] = ''
if os.path.exists('language_voice_diction_hebrew/4web_restudy/已复习') and not os.path.exists('language_voice_diction_hebrew/4web_restudy/common_info'):
restudy_info['alt4'] = 'alt4已复习'
restudy_info['alt4_common'] = ''
elif not os.path.exists('language_voice_diction_hebrew/4web_restudy/common_info'):
restudy_info['alt4'] = 'alt4'
restudy_info['alt4_common'] = ''
#restudy_info['alt1_common'] = ''
#restudy_info['alt2_common'] = ''
#restudy_info['alt3_common'] = ''
#restudy_info['alt4_common'] = ''
return render(request, 'index.html', restudy_info)
| [
"c_cstudy@126.com"
] | c_cstudy@126.com |
94b74c62d61153ec24530c54e93729e90ef64ff6 | 12eb408cd0062242e73bca3b8d5ba3f6da933536 | /kelpy/helpdocs/concepts.py | 9e44f8538af7a38defe00a497d436b7c9d762b79 | [] | no_license | pdarragh/Kelpy | 23475bbb0fcd7b7b9b7b1962c8dcd761a83ebddd | 9b650cf19e88e84e6c593cf1895622754d5badc2 | refs/heads/master | 2021-01-10T05:30:03.357740 | 2015-11-10T07:43:47 | 2015-11-10T07:43:47 | 43,455,391 | 1 | 0 | null | 2015-11-10T07:42:33 | 2015-09-30T19:52:23 | Python | UTF-8 | Python | false | false | 933 | py | functions = '''\
FUNCTIONS
Kelpy is what is called a "Polish prefix" style of language; this means that the
function operators go before the arguments. For example, if we want to add the
numbers 2 and 3, we would do:
{+ 2 3}
It may seem funny at first, but you'll get used to it!
'''
math = '''\
MATH
There is plenty of support for math with numbers in Kelpy! The following
operators are all fully implemented for numbers:
+ add
- subtract
* multiply
/ divide
% modulo
Any of these can be used with any number of arguments. The operation will be
applied to the arguments in order from left to right.
'''
comparison = '''\
COMPARISON
Logical comparisons can be very useful, so of course Kelpy supports it. These
comparison operations will return boolean values:
== equality
!= inequality
< less than
> greater than
<= less than or equal
>= greater than or equal
'''
| [
"pierce.darragh@gmail.com"
] | pierce.darragh@gmail.com |
b80dd1b91c7d81639a5a8557df86e7c03032b77a | 27e47da8c81126f350fcaa738e219956109ed28d | /testing/academia/asistencia/migrations/0010_remove_examen_codigo.py | 21e9ca70a58b151016695c969b26ec57dfff460f | [] | no_license | tinyhosmarcos/Municipiov1 | 4fdba73bb0b6e949e4ea54902dd83d9b36a4af15 | b3c330e7d97a74ecdf308d32e9060758d8c98036 | refs/heads/master | 2020-06-04T18:53:39.515619 | 2019-07-17T17:26:54 | 2019-07-17T17:26:54 | 192,153,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-07-11 17:05
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('asistencia', '0009_auto_20190710_1022'),
]
operations = [
migrations.RemoveField(
model_name='examen',
name='codigo',
),
]
| [
"yhostin.ollachica@ucsp.edu.pe"
] | yhostin.ollachica@ucsp.edu.pe |
94fd535d5d4ae608d36b447cb468abaea327ef70 | 6a04f02ad3fc13bf9ccb99a6ef80051e1e812e6d | /python/csv_importer/mab.py | 6db0df713fa40d980489727d7d8c0b04cd375383 | [] | no_license | quickly3/game-search-engine2 | 63c616076bbd5cb24cfa2424f82c1da7388183cd | 5f36c2848ae3dcc9001412d1d47d81d5230df25d | refs/heads/master | 2023-05-26T04:29:09.607937 | 2023-05-15T03:18:56 | 2023-05-15T03:18:56 | 207,987,027 | 5 | 1 | null | 2021-03-24T13:22:15 | 2019-09-12T07:07:57 | Python | UTF-8 | Python | false | false | 364 | py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# plt.close('all')
ts = pd.Series(np.random.randn(1000),
index=pd.date_range('1/1/2000', periods=1000))
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
columns=['A', 'B', 'C', 'D'])
df = df.cumsum()
df.plot()
plt.legend(loc='best')
plt.show()
| [
"hongbin@inceptionpad.com"
] | hongbin@inceptionpad.com |
fec2c28c4d26638fe33baaa168e359525a577797 | 919e2c8eb34dab79f45b44a547d18add3c40d274 | /mac/shop/migrations/0003_contact.py | 3ce9e373ae9b317320e7cc9b410019366bbf5df3 | [] | no_license | Amitojasa/Ecommerce-website | 663d57c0a39ccae40473b176ebbcae24428704cb | 695050ce2e693b3f7a9cf5f445cafacb2dd411bc | refs/heads/master | 2020-04-25T16:53:05.003379 | 2019-04-25T14:15:55 | 2019-04-25T14:15:55 | 172,927,680 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # Generated by Django 2.1.5 on 2019-04-15 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20190227_1833'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('msg_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('email', models.CharField(default='', max_length=70)),
('phone', models.CharField(default='', max_length=70)),
('desc', models.CharField(default='', max_length=500)),
],
),
]
| [
"amitojvmc@gmail.com"
] | amitojvmc@gmail.com |
0ea487eefddf2b691bbd4615be6c28583189c22e | 02c394db353d996038c9bedbeaf91bb080c12ca2 | /dsm/epaxos/replica/config.py | fbfd36f11f0765a18dcf07d8a0b82a49e91101b1 | [
"MIT"
] | permissive | Limber0117/python-epaxos | 0633752cffaca65c0d8b9c3aecf9c8bc6ca70f3e | e68bab50e7df32770103196c91d8708863691579 | refs/heads/master | 2021-08-23T22:31:47.283682 | 2017-12-06T22:16:21 | 2017-12-06T22:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | from collections import defaultdict
from typing import Any, List
class ReplicaState:
def __init__(
self,
# channel: Any,
epoch: int,
replica_id: int,
quorum_fast: List[int],
quorum_full: List[int],
live: bool = True,
timeout: int = 3,
jiffies: int = 33,
timeout_range: int = 3,
checkpoint_each: int = 10,
):
# self.channel = channel
self.epoch = epoch
self.replica_id = replica_id
self.quorum_fast = quorum_fast
self.quorum_full = quorum_full
self.live = live
self.timeout = timeout
self.ticks = 0
self.jiffies = jiffies
self.seconds_per_tick = 1. / self.jiffies
self.packet_counts = defaultdict(int)
self.timeout_range = timeout_range
self.total_sleep = 0
self.total_exec = 0
self.total_timeouts = 0
self.total_recv = 0
self.checkpoint_each = checkpoint_each
def tick(self):
self.ticks += 1 | [
"acizov@gmail.com"
] | acizov@gmail.com |
e92fa6494201e72f4e1489df50a36bdc9f5b0999 | 0c1d817f89737dd711bef41a10c67049ed0d4226 | /.buildozer/android/platform/build-armeabi-v7a/build/venv/bin/pip3 | 0e9731ae41ab1c0444a0f72e71983424c79cf9ae | [] | no_license | joshl229/barwatchApp | 4831e8d23a6b987c55543bc1fb826db18ddb84c2 | 2cc18f00d1ba36769b750f14f338b8cf4b756af3 | refs/heads/master | 2023-04-09T07:15:08.818483 | 2021-04-27T00:01:28 | 2021-04-27T00:01:28 | 359,698,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | #!/home/josh/PycharmProjects/watchband/.buildozer/android/platform/build-armeabi-v7a/build/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"joey32165@gmail.com"
] | joey32165@gmail.com | |
17039cbc48b3050ec5caf1d2e6a113ebfe47cd88 | 193cc6bd3d04669b5d79dfb608d40a79dd50a482 | /models/db.py | 10cd25de372f74052ac52238fb9f469269066966 | [
"LicenseRef-scancode-public-domain"
] | permissive | johnpayne/sandbox | d4ad1a3b65ba11be0f1eccdaf4910b21f6cafef1 | a133c6e444e48bbc506ab93183d64876ce1286ca | refs/heads/master | 2020-05-21T00:57:28.872129 | 2013-01-01T08:33:00 | 2013-01-01T08:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,498 | py | # -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
#db = DAL('sqlite://storage.sqlite')
# db = DAL('mssql://sa:asdfasdf3@jprt/Maranatha_Admin')
db = DAL('mysql://root:fatter!45@localhost/ipeerlend')
db._adapter.maxcharlength = 65535
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore')
## store sessions and tickets there
session.connect(request, response, db = db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Crud, Service, PluginManager, prettydate
auth = Auth(db)
crud, service, plugins = Crud(db), Service(), PluginManager()
## create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
## configure email
mail=auth.settings.mailer
mail.settings.server = 'logging' or 'smtp.gmail.com:587'
mail.settings.sender = 'you@gmail.com'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
from gluon.contrib.login_methods.rpx_account import use_janrain
use_janrain(auth,filename='private/janrain.key')
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
db.define_table('contact',
Field('name'),
Field('phone'))
db.define_table('marketplace6',
Field('CreationDate', 'datetime'),
Field('GroupsCountToDate', 'decimal(28,10)'),
Field('HistoricalInterestRatesTable', 'string', length=8000),
Field('LoansClosedCountToDate', 'decimal(28,10)'),
Field('MemberBorrowersCountToDate', 'decimal(28,10)'),
Field('MemberGroupLeadersCountToDate', 'decimal(28,10)'),
Field('MemberLendersCountToDate', 'decimal(28,10)'),
Field('MemberRegistrationsCountToDate', 'decimal(28,10)'),
Field('PrincipalClosedAmountToDate', 'decimal(28,10)'), migrate=False)
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
mail.settings.server = settings.email_server
mail.settings.sender = settings.email_sender
mail.settings.login = settings.email_login
| [
"John@JPRT.(none)"
] | John@JPRT.(none) |
695ad2daca44d87d73bbf3d80631f93a5a7d68dd | 954851edc93cda5023b6802ea5c15c292a1f922c | /youtube_search/webapp/views.py | e0cc3a29e7a696670ab10d62325087ab2ff78f35 | [] | no_license | bhavnbgv/youtube_repo | b0e8f7a1d732c66be002b78def662c6b63e32126 | dea24c7cd85f9c574795518cfe2a14d5031c3894 | refs/heads/master | 2021-01-12T12:17:27.198361 | 2016-11-02T10:09:23 | 2016-11-02T10:09:23 | 72,411,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,304 | py | from django.shortcuts import render
from rest_framework.decorators import api_view
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from apiclient.discovery import build
from apiclient.errors import HttpError
from globalconfig import API_KEY,API_NAME,API_VERSION
from django.http.response import HttpResponse
from webapp.models import YoutubeVideo
import json
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from webapp.serializers import YoutubeVideoSerializer
# Create your views here.
@api_view(['GET','POST'])
def VideoList(request):
if request.method=='POST':
data = request.POST
print data,"data******\n"
query = data.get('searchkeyword')
channel = data.get('channelid')
page_token = data.get('pagetoken')
print page_token,"page token"
print query,channel
if query == "":
query=None
if channel=="":
channel=None
if page_token=="":
page_token=None
print query,channel,page_token
youtube = build(API_NAME,API_VERSION,developerKey=API_KEY)
print youtube,"youtube********\n"
try:
search_response = youtube.search().list(
q=query,
part="id,snippet",
channelId=channel,
pageToken=page_token,
maxResults=10).execute()
except Exception, ex:
msg = json.loads(ex.content)
ret_msg = msg['error']['message']
return HttpResponse(json.dumps({"videos_list":ret_msg}), content_type="application/json")
print search_response,"search response********\n"
next_page = search_response.get('nextPageToken')
prev_page = search_response.get('prevPageToken')
if next_page == None:
next_page=""
if prev_page == None:
prev_page=""
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append("%s ---- %s " % (search_result["id"]["videoId"],search_result["snippet"]["title"]))
videos.append("<br>")
if not YoutubeVideo.objects.filter(video_id=search_result["id"]["videoId"]).exists():
yt = YoutubeVideo(video_id=search_result["id"]["videoId"],video_title=search_result["snippet"]["title"],video_description=search_result["snippet"]["description"])
yt.save()
print videos
return HttpResponse(json.dumps({"videos_list":videos,"next_page" : next_page, "prev_page" : prev_page}), content_type="application/json")
return render_to_response('videolist.html',context_instance=RequestContext(request))
@api_view(['GET', 'POST'])
def RestApiList(request):
videos = YoutubeVideo.objects.all()
serializer = YoutubeVideoSerializer(videos, many=True)
if request.method == 'POST':
serializer = YoutubeVideoSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data)
@api_view(['GET', 'PUT', 'DELETE'])
def VideoDetail(request, pk):
try:
videos = YoutubeVideo.objects.get(pk=pk)
except Exception, ex:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = YoutubeVideoSerializer(videos)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = YoutubeVideoSerializer(videos, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
videos.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| [
"bhargava@dewhive.com"
] | bhargava@dewhive.com |
9758f45b0501f1a0f30cf652bd100cfc97b8c8a3 | ceaec06edefb609af597a0151eb567443b32264c | /psyclab/neural/field_2d.py | df96503958cc1d47db9358860454d12a72ae9cd5 | [
"MIT"
] | permissive | venachescu/psyclab | a3fc6b0599ff927aedfdebcd0a1fd624ed033233 | 40dadd9ffa3b1761aecd1109441d0652dedb91ef | refs/heads/master | 2020-06-29T19:27:20.658501 | 2019-08-05T07:20:18 | 2019-08-05T07:20:18 | 200,603,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,356 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Dynamic Neural Field model
based on code and examples from Nicolas P. Rougier and Eric J. Nichols
References
----------
[1] Rougier, N. P. (2005). Dynamic neural field with local inhibition.
Biological Cybernetics, 94(3), 169–179. http://doi.org/10.1007/s00422-005-0034-8
"""
import numpy as np
from numpy.fft import fft2, ifft2, fftshift, ifftshift
class NeuralField(object):
def __init__(self, neurons=512, time_step=0.01):
self._n = neurons
self._size = 30.0
self._gamma = 1.0
self._axon_speed = 500.0
self._values = np.zeros((self._n, self._n))
self._input = np.zeros((self._n, self._n))
self._voltage = np.zeros((self._n, self._n))
self._steps = 0
self._elapsed = 0.0
self._time_step = time_step
self._activity = np.zeros((1, self._n, self._n))
def initialize(self, size):
a, b = np.meshgrid(
np.arange(-size / 2.0, size / 2.0, size / float(self._n)),
np.arange(-size / 2.0, size / 2.0, size / float(self._n)))
x = np.sqrt(a**2 + b**2)
self._values = x
self._noise_kernel = np.exp(-(a**2/32.0 + b**2/32.0))/(np.pi*32.0) * 0.1 * np.sqrt(self._time_step)
self._kernel = -4 * np.exp(-x / 3) / (18 * np.pi)
self._kerneli = self.precompute_kernel(self._n, size, self._time_step, self._axon_speed)
self._nrings = len(self._kerneli)
self._activity = np.stack([fft2(self.S),] * self._nrings)
# self._rate = fftshift(fft2(ifftshift(self.S))).real
def precompute_kernel(self, n, size, time_step, axon_speed):
radius = np.sqrt((n / 2.0)**2 + (n / 2.0)**2)
self._synaptic = size ** 2 / float(n ** 2)
# width of a ring in # of grid intervals
width = max(1.0, self._axon_speed * time_step * n / size)
n_rings = 1 + int(radius / width)
def disc(step, n):
def distance(x, y):
return np.sqrt((x - n // 2)**2 + (y - n // 2)**2)
D = np.fromfunction(distance, (n, n))
return np.where(D < (step * width), True, False).astype(np.float32)
# Generate 1+int(d/r) rings
disc1 = disc(1, n)
L = [disc1 * self.K]
for i in range(1, n_rings):
disc2 = disc(i + 1, n)
L.append(((disc2 - disc1) * self.K))
disc1 = disc2
# Precompute Fourier transform for each kernel ring since they're
# only used in the Fourier domain
Ki = np.zeros((n_rings, n, n)) # self.Ki is our kernel in layers in Fourier space
for i in range(n_rings):
Ki[i, :, :] = np.real(fftshift(fft2(ifftshift(L[i]))))
return Ki
def step(self, dt=None):
dt = dt or self._time_step
self._steps += int(round(dt / self._time_step))
self._elapsed += dt
L = np.sum([k * u for k, u in zip(self.Ki, self.U)], axis=0)
L = self._synaptic * (fftshift(ifft2(ifftshift(L)))).real
e = np.random.normal(0, 1.0, (self._n, self._n)) * self._noise_kernel
dV = dt / self._gamma * (-self.V + L + self.I) + e
self.V += dV
self._activity = np.roll(self._activity, 1, axis=0)
self._activity[0] = fft2(self.S)
# self.U = fftshift(fft2(ifftshift(self.S)))
def firing_rate(self, V=None):
if V is None:
V = self.V
S0 = 1.0 # S: maximum frequency
alpha = 10000.0 # α: steepness at the threshold
theta = 0.005 # θ: firing threshold
return S0 / (1.0 + np.exp(-1*alpha*(V-theta)))
@property
def V(self):
return self._voltage
@V.setter
def V(self, value):
self._voltage = value
@property
def I(self):
return self._input
@I.setter
def I(self, value):
self._input = value
@property
def K(self):
return self._kernel
@property
def Ki(self):
return self._kerneli
@property
def S(self):
return self.firing_rate(self.V)
@property
def U(self):
return self._rate
@U.setter
def U(self, value):
self._rate = [value] + self._rate[:-1]
@property
def active_neurons(self):
""" Indices of neurons with activity above threshold """
indices, = np.where(self.output > self.threshold)
if not len(indices):
return
return indices
@property
def activity_center(self):
""" Index of the neuron at the center of highest activity density """
indices = self.active_neurons
if indices is None:
return int(np.argmax(self.activity))
return int(np.median(indices))
@property
def parameters(self):
""" Dictionary of all numerical parameters for the neural field """
names, _ = zip(*getmembers(self.__class__, predicate=lambda m: isinstance(m, (int, float))))
return dict((name, getattr(self, name)) for name in names)
if __name__ == "__main__":
'''This is the input from external source, I.
You can delete/add/change variables but you must initialize an I that uses x.'''
# Gamma = 20.0
# sigma = 5.65685425
# I = Gamma * (np.exp(-1 * x**2 / sigma**2) / (sigma**2 * np.pi))
nf = NeuralField()
nf.initialize(64)
| [
"vinceenachescu@gmail.com"
] | vinceenachescu@gmail.com |
ee99160a507f18d502ef1b8e1695b0e8369b54d8 | 8049ba531ea34f07b065a11dd1c9a5d68a00580f | /app/models.py | bac68d9dcf6103505c200b55cdaa3262065c452d | [] | no_license | aoisoratoumi/django-booking | 94b29020c2390bd51d0d1a8451e3be08a9062793 | a178c5f2d05bffe629fc828e7dc307f517718f37 | refs/heads/master | 2022-09-13T22:43:26.308133 | 2020-05-29T23:57:22 | 2020-05-29T23:57:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | from django.db import models
from django.utils import timezone
from accounts.models import CustomUser
class Store(models.Model):
name = models.CharField('店舗', max_length=100)
address = models.CharField('住所', max_length=100, null=True, blank=True)
tel = models.CharField('電話番号', max_length=100, null=True, blank=True)
description = models.TextField('説明', default="", blank=True)
image = models.ImageField(upload_to='images', verbose_name='イメージ画像', null=True, blank=True)
def __str__(self):
return self.name
class Staff(models.Model):
user = models.OneToOneField(CustomUser, verbose_name='スタッフ', on_delete=models.CASCADE)
store = models.ForeignKey(Store, verbose_name='店舗', on_delete=models.CASCADE)
def __str__(self):
return f'{self.store}:{self.user}'
class Booking(models.Model):
staff = models.ForeignKey(Staff, verbose_name='スタッフ', on_delete=models.CASCADE)
first_name = models.CharField('姓', max_length=100, null=True, blank=True)
last_name = models.CharField('名', max_length=100, null=True, blank=True)
tel = models.CharField('電話番号', max_length=100, null=True, blank=True)
remarks = models.TextField('備考', default="", blank=True)
start = models.DateTimeField('開始時間', default=timezone.now)
end = models.DateTimeField('終了時間', default=timezone.now)
def __str__(self):
start = timezone.localtime(self.start).strftime('%Y/%m/%d %H:%M')
end = timezone.localtime(self.end).strftime('%Y/%m/%d %H:%M')
return f'{self.first_name}{self.last_name} {start} ~ {end} {self.staff}'
| [
"harukun2002@gmail.com"
] | harukun2002@gmail.com |
c67778a3990c58050c5c41719c59cbccdbb16e3a | 04ab3ae12e737669ac1d3cb83219939ff4a92bcd | /src/feature_eval.py | 2869cbb09a3b4cde064af578f2efc17643e7d019 | [
"MIT"
] | permissive | abhimishra91/hackathon-framework | 8e78afabf9a1cb6d82a0e6a049888c7c8ff78e20 | 77e8c50addfc79a42cec42b9f615751219d25a0b | refs/heads/master | 2023-02-18T02:45:29.752685 | 2021-01-21T06:04:59 | 2021-01-21T06:04:59 | 267,203,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | import matplotlib.pyplot as plt
import seaborn as sns
class FeatEvaluation:
def __init__(self, df, target_col: str = None):
"""
:param df: Dataframe which will be analysed
:param target_col: String of the colummn name that is the target for this analysis in the dataframe
"""
self.df = df
self.target = target_col
def stat_desc(self, col):
if self.df[col].dtype == "O":
return "Categorical Data"
else:
return self.df[col].describe().loc[["min", "max"]]
def feature_report(self):
print("Feature Report Generated for all the columns in the Dataframe")
for col in self.df.columns:
print("\n")
print(f"Feature Report for Column: {col}")
print("~~~~~~==================~~~~~~")
print(str(self.stat_desc(col)))
print(f"No of Unique Values: {self.df[col].nunique()}")
print(f"No of Values in the column: {self.df[col].value_counts()}")
return
def feature_plot(self):
for col in self.df.columns:
print("Plotting the Distribution for: {0}".format(col))
if self.df[col].dtype == "O":
plt.figure(figsize=(16, 9))
sns.boxplot(x=col, y=self.target, data=self.df)
plt.show()
else:
plt.figure(figsize=(16, 9))
sns.distplot(self.df[col].values)
plt.show()
return
def corelation_plot(self):
corr = self.df.corr()
plt.figure(figsize=(16, 9))
sns.heatmap(
corr,
annot=True,
vmin=-1,
vmax=1,
center=0,
cmap="coolwarm",
linewidths=1.5,
linecolor="black",
)
plt.show()
return
# if __name__ == "__main__":
# import config
# import pandas as pd
# RAW_TRAIN_DATA = config.RAW_DATA
# TEST_DATA = config.TEST_DATA
#
# train_df = pd.read_csv(RAW_TRAIN_DATA)
# test_df = pd.read_csv(TEST_DATA)
# test_df['target'] = -99999
# eval = FeatEvaluation(train_df, 'price')
# print(eval.feature_report())
| [
"abhimishra.91@gmail.com"
] | abhimishra.91@gmail.com |
67625ed8122fc11c906ad83907a8303cc83d77b9 | fb28906c1f0347ffe50193f6c2bad2d4b490fa9c | /budger/directory/migrations/0018_ksoemployee_is_developer.py | 035b40d574b286ea6259fb47f17f3cee1ebb2261 | [] | no_license | pavkozlov/budger-server | 20c695309c34a0451d25b83ab8583b14f0d21c0c | 7a98c1789414c83625bda1e5b29cbe5587c3cd6a | refs/heads/master | 2020-12-17T06:35:10.550905 | 2020-01-13T13:27:42 | 2020-01-13T13:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.2.6 on 2019-12-04 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('directory', '0017_auto_20191203_1640'),
]
operations = [
migrations.AddField(
model_name='ksoemployee',
name='is_developer',
field=models.BooleanField(db_index=True, default=False),
),
]
| [
"it.pavelkozlov@gmail.com"
] | it.pavelkozlov@gmail.com |
d8a96074d01b6931939f6103a740df34e6604acf | 0fbfb318227a6c9047f1aee6ef499c18b0db041a | /indexer.py | 5236279f981be5c61ded703855d32580c25671a6 | [] | no_license | arasraj/Search-Engine | ee6861d6a5457c200a382a9d4d8fc42933eb2c86 | 879d91fe1fb3b89eaf4a7edea5abece05495f414 | refs/heads/master | 2021-01-18T18:25:18.528533 | 2011-03-14T07:20:22 | 2011-03-14T07:20:22 | 1,419,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,899 | py | import os
import codecs
import stemmer
import re
import math
import logging
import numpy as np
import cPickle as pickle
class Indexer():
logger = ''
#can move most of these outside init
def __init__(self):
self.regex = self.regex_compile()
self.g_docterm = {}
self.stopwords = open('stopwords.txt', 'r').read().split()
self.pstemmer = stemmer.PorterStemmer()
self.df = {}
self.termlist = []
self.g_matrix = []
self.term_index = {}
self.doc_index = {}
self.log_init()
#python caches regexs in which same pattern is used
#so compiling here is not necessary unless I swap in
#patterns a lot
def regex_compile(self):
words = re.compile(r'\W*')
return words
def doc_list(self, dir):
docs = os.listdir(dir)
return [doc for doc in docs]
#def sim(self, query):
# q_terms = query.split()
# q = [self.pstemmer(term, 0, len(term)-1) for term in q_terms]
# for i in range(len(self.doc_index)):
# np.dot(q, self.g_matrix[i])
def index(self):
self.log_init()
docs = self.doc_list('sanitized')
length = len(docs)
count = 0
for doc in docs:
fin = codecs.open('sanitized/'+doc, 'r', 'utf-8')
sanitized_doc = [word[:-1] for word in fin]
fin.close()
length -= 1
print length
self.tf_per_doc(sanitized_doc, doc)
self.populate_termlist(sanitized_doc)
#self.doc_index[doc] = count
#count +=1
unique_terms = self.dupless_terms(self.termlist)
self.termlist = unique_terms
self.term_indexer()
self.create_matrix(len(docs))
self.populate_matrix(docs)
self.tfidf()
self.persist_lists(self.g_matrix, self.term_index, self.df)
#print self.g_matrix[self.doc_index['b.txt']][self.term_index['cat']]
def persist_lists(self, matrix, term_index, df):
#numpy has own pickling builtin so use it
matrix.dump('pickle/matrix.pkl')
term_pickle = open('pickle/term.pkl', 'wb')
df_pickle = open('pickle/df.pkl', 'wb')
indextodoc = open('pickle/indextodoc.pkl', 'wb')
index_to_doc = {}
for item in self.doc_index.items():
index_to_doc[item[1][1]] = item[0]
pickle.dump(term_index, term_pickle)
pickle.dump(df, df_pickle)
pickle.dump(index_to_doc, indextodoc)
term_pickle.close()
indextodoc.close()
df_pickle.close()
#add col and rows as parameters?
def tfidf(self):
count=0
for doc in self.doc_index:
for term in self.term_index:
tf = float(self.g_matrix[self.doc_index[doc][1]][self.term_index[term]])
if tf > 0.0:
df = float(self.df[term])
idf = math.log((len(self.doc_index) / df), 2)
_tfidf = tf*idf
self.g_matrix[self.doc_index[doc][1]][self.term_index[term]] = _tfidf
count += 1
print count
#not gaurunteed any order here because of hashtables
def populate_matrix(self, docs):
for doc_key in self.g_docterm.keys():
term_dict = self.g_docterm[doc_key]
for term in term_dict.keys():
doc_index = self.doc_index[doc_key][1]
term_index = self.term_index[term]
self.g_matrix[doc_index][term_index] = term_dict[term]
#populate df values
if term_dict[term] > 0:
self.df[term] += 1
def term_indexer(self):
count = 0
for term in self.termlist:
self.term_index[term] = count
count += 1
#init df
self.df[term] = 0
#print term, count
def create_matrix(self, size):
num_terms = len(self.termlist)
num_docs = len(self.doc_index)
#print 'num of terms %s' % str(num_terms)
#useing numpy ndarray instead
self.g_matrix = np.zeros((num_docs, num_terms), dtype=np.float)
#for i in range(size):
# tmp = [0] * num_terms
# self.g_matrix.append(tmp)
def sanitize(self, dir):
docs = self.doc_list(dir)
length = len(docs)
count = 0
for doc in docs:
length -= 1
print length
#us os.join here
try:
fin = codecs.open('index2/'+doc, 'r', 'utf-8')
fout = codecs.open('sanitized/'+doc, 'w', 'utf-8')
except:
self.logging.error('Error with file %s' % doc)
return False
contents = fin.read()
m = re.search(r'\*\*\s(.*)\s\*\*', contents)
self.doc_index[doc] = [m.group(1), count]
count += 1
#fin.seek(0)
#split on nonalphanumerics
tmp = [word.lower() for word in self.regex.split(contents) if word != '']
stopwordless = self.remove_stopwords(tmp)
stemmed = [self.pstemmer.stem(word, 0, len(word)-1) for word in stopwordless]
for stem in stemmed:
fout.write(stem)
fout.write('\n')
fout.flush()
fin.close()
fout.close()
doclinks_index = open('pickle/doclinks_index.pkl', 'wb')
pickle.dump(self.doc_index, doclinks_index, -1)
doclinks_index.close()
def remove_stopwords(self, doc):
return [word for word in doc if word not in self.stopwords]
def tf_per_doc(self, words, doc):
tf = {}
#prevword = ''
for word in words:
if tf.get(word):
tf[word] += 1
else:
tf[word] = 1
#requires sorting list
#if word == prevword:
# break
#if word in self.df:
# self.df[word] +=1
#else:
# self.df[word] = 1
#prevword = word
self.g_docterm[doc] = tf
def populate_termlist(self, terms):
#why not just create term index here too
for term in terms:
self.termlist.append(term)
def dupless_terms(self, terms):
return list(set(terms))
def log_init(self):
self.logger = logging.getLogger('se')
handler = logging.FileHandler('log/indexer.log')
self.logger.addHandler(handler)
self.logger.setLevel(logging.WARNING)
if __name__ == '__main__':
indexer = Indexer()
indexer.sanitize('index2')
indexer.index()
| [
"arasraj@gmail.om"
] | arasraj@gmail.om |
b9b63957a2b26c1eb556dbadc270f595d83987d7 | 8c1f61332b33d527bf00ee37375dbc7b4a48e5a8 | /convert_jpg_tif.py | 3d5d619e8043744a865da7526fae22364a2a9f6a | [] | no_license | william-wen/daisy_intelligence_2020 | 987cb7f02008046ca5ceadaff887accd318b8da0 | c5348c9c23ef0ea87814f32db5a4e8246252827c | refs/heads/master | 2022-12-10T02:05:59.728664 | 2020-01-26T13:45:40 | 2020-01-26T13:45:40 | 236,195,861 | 0 | 1 | null | 2022-12-08T03:30:09 | 2020-01-25T16:27:38 | Jupyter Notebook | UTF-8 | Python | false | false | 110 | py | from PIL import Image
im = Image.open('flyer_images/week_52_page_4.jpg')
im.save('test.tif') # or 'test.tif'
| [
"lauradang.2000@gmail.com"
] | lauradang.2000@gmail.com |
f55510e0cc367aad9ebfda9b2a6faa0435ae1473 | 2119953dd04916fa2adf3f42a487f3f9754d1f66 | /modules/sandbox/docker/geo-web-viz/app.py | 9034f8727870c8bdee5a64203363aecd3f7ec266 | [
"MIT"
] | permissive | sarahwertz/sepal | 91d12e3317cd07ad4c99469d5b6211d74013b330 | efbbc33ac99db332fc13f9dfd4c777a8d2c1b41e | refs/heads/master | 2020-06-11T07:42:08.835556 | 2019-05-27T14:21:28 | 2019-05-27T14:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,038 | py | import json
import logging
import traceback
import sys
from flask import Flask, Blueprint, request, Response
import config
import layers
import raster
import render
from config import to_file
app = Flask(__name__)
http = Blueprint(__name__, __name__)
session_state = {'layer_by_id': {}, 'index_by_id': {}, 'renderers': {}}
@http.errorhandler(Exception)
def handle_invalid_usage(error):
print(error)
print_stacktrace()
return "Internal Error", 500
@http.route('/layers', methods=['GET'])
def list_layers():
return json_response(layers.list_layers(state()))
@http.route('/layers/order', methods=['POST'])
def order_layers():
layers.reorder(json.loads(request.values['order']), state())
return json_response({'status': 'OK'})
@http.route('/raster/info', methods=['GET'])
def raster_info():
raster_file = to_file(request.values['path'])
return json_response(
{
'bandCount': raster.band_count(raster_file),
'nodata': raster.read_nodata(raster_file)
}
)
@http.route('/raster/band/<band_index>', methods=['GET'])
def band_info(band_index):
nodata = request.values.get('nodata', None)
if nodata:
nodata = float(nodata)
return json_response(
raster.band_info(
raster_file=to_file(request.values['path']),
band_index=int(band_index),
nodata=nodata)
)
@http.route('/raster/save', methods=['POST'])
def save_raster():
layer = json.loads(request.values['layer'])
bounds = layers.save_raster(layer, state())
return json_response({'bounds': bounds})
@http.route('/shape/save', methods=['POST'])
def save_shape():
layer = json.loads(request.values['layer'])
bounds = layers.save_shape(layer, state())
return json_response({'bounds': bounds})
@http.route('/layers/<layer_id>', methods=['DELETE'])
def remove_raster(layer_id):
layers.remove_layer(layer_id, state())
return json_response({'status': 'OK'})
@http.route('/layers/features/<lat>/<lng>')
def attributes(lat, lng):
return json_response(layers.features(float(lat), float(lng), state()))
@http.route('/layer/<layer_id>/<z>/<x>/<y>.<fmt>')
def render_tile(layer_id, z, x, y, fmt):
return Response(
render.render_tile(layer_id, int(z), int(x), int(y), str(fmt), renderers()),
mimetype=('image/%s' % fmt)
)
def state():
return session_state
def renderers():
return state().get('renderers', {})
def json_response(data):
return Response(json.dumps(data), mimetype='application/json')
def print_stacktrace():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
app.config['PROPAGATE_EXCEPTIONS'] = True
app.register_blueprint(http)
app.secret_key = config.session_key
app.run(
host='0.0.0.0',
port=config.server_port,
threaded=True,
debug=config.debug_mode
)
| [
"daniel.wiell@fao.org"
] | daniel.wiell@fao.org |
732ccf2811be54afbd199a94e72658e129c6f81b | 8e09c9562173cb40fe26912fcdb1d4c6c08897d7 | /tfx/components/evaluator/component_test.py | 52d28474308225045d848dd0c656642a98ec0934 | [
"Apache-2.0"
] | permissive | robertlugg/tfx | 6a0050f6f1876ba5d53e45fd0d80acac2441187d | 49778c502bb6668ed8230877407fe40ae3a99a06 | refs/heads/master | 2020-07-27T17:00:47.355938 | 2019-09-16T23:00:02 | 2019-09-16T23:00:32 | 209,164,014 | 0 | 0 | Apache-2.0 | 2019-09-17T21:58:47 | 2019-09-17T21:58:46 | null | UTF-8 | Python | false | false | 1,928 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.evaluator.component."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tfx.components.evaluator import component
from tfx.proto import evaluator_pb2
from tfx.types import channel_utils
from tfx.types import standard_artifacts
class ComponentTest(tf.test.TestCase):
def testConstruct(self):
examples = standard_artifacts.Examples()
model_exports = standard_artifacts.Model()
evaluator = component.Evaluator(
examples=channel_utils.as_channel([examples]),
model_exports=channel_utils.as_channel([model_exports]))
self.assertEqual('ModelEvalPath', evaluator.outputs.output.type_name)
def testConstructWithSliceSpec(self):
examples = standard_artifacts.Examples()
model_exports = standard_artifacts.Model()
evaluator = component.Evaluator(
examples=channel_utils.as_channel([examples]),
model_exports=channel_utils.as_channel([model_exports]),
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
self.assertEqual('ModelEvalPath', evaluator.outputs.output.type_name)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
69f331d31eac620a94880598fd00096f78ff9c33 | f673f322e75062f5d5a667f07eb4bc6c39c79c01 | /Python/day 1/fro.py | 1de9819813b0a47499eb24f1628d58178ecab41f | [] | no_license | 5p4r70n/ICT-Training | fbf986fbc72af2855670f5facfb18f129cbce72f | 5ceeb78420ad785078e547059b5960177afdb27f | refs/heads/master | 2020-04-16T22:45:47.237356 | 2019-02-06T11:18:55 | 2019-02-06T11:18:55 | 165,982,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | digits=[0,2,5]
for i in range(len(digits)):
print(i," ",digits[i])
else:
print(" no items left")
print(range(len(digits)))
print(len(digits))
| [
"Administrator@Asiet-Lab.ASIETDomain.com"
] | Administrator@Asiet-Lab.ASIETDomain.com |
efc55a073b926991fd43116f9fdd132aabaee02c | 55a4573cdeb116b20a625a398af04337f180d598 | /instrument/ifmessage.py | a4c5e4261d01cb8b46c95bd26d5bfe772ae5403e | [
"Unlicense"
] | permissive | NOAA-PMEL/omega-trh-daq | f506e4c968b7942dccb6cf012c377c3719a04143 | 98a18c62130af36d43c2882659e65321c3a98529 | refs/heads/master | 2020-04-02T11:10:18.632072 | 2019-07-23T15:33:39 | 2019-07-23T15:33:39 | 154,374,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 9 09:33:42 2018
@author: derek
"""
class InterfaceMessage():
imFirstIndex = 0
imLastIndex = 1
imAllIndex = 2
# interface STATES
imWaitingToConnect = 'WaitingToConnect'
imConnected = 'Connected'
imDisconnected = 'Disconnected'
imStopped = 'Stopped'
def __init__(self):
self.input_ready = False
self.input = []
self.output_ready = False
self.output = []
self.connection_status = False
self.state = self.imStopped
def add_input(self, msg):
self.input.append(msg)
self.input_ready = True
def has_input(self):
if (len(self.input) > 0):
return True
return False
def get_input(self, index=None, clear_buffer=False):
msg = []
if (index is None or index == InterfaceMessage.imFirstIndex):
msg.append(self.input.pop(0))
elif (index == InterfaceMessage.imLastIndex):
msg.append(self.input.pop())
elif (index == InterfaceMessage.imAllIndex):
clear_buffer = True
msg = self.input
else:
# throw exception?
pass
if (clear_buffer):
self.input = []
return msg
def add_output(self, msg):
self.output.append(msg)
self.output_ready = True
def has_output(self):
if (len(self.output) > 0):
return True
return False
def get_output(self, index=None, clear_buffer=True):
msg = []
if (index is None or index == InterfaceMessage.imFirstIndex):
msg.append(self.output.pop(0))
elif (index == InterfaceMessage.imLastIndex):
msg.append(self.output.pop())
elif (index == InterfaceMessage.imAllIndex):
clear_buffer = True
msg = self.output
else:
# throw exception?
pass
if (clear_buffer):
self.output = []
# print(self.output)
return msg
| [
"derek.coffman@noaa.gov"
] | derek.coffman@noaa.gov |
f3df773b77584d6d4547fddceed4599e7690fcdf | f486d2a385261c144f825a82b604a28446dc07ea | /qgis/scripts/streets_in_neighborhood.py | 55305e11d2252ac3b037eef43cd85c36c3da3229 | [] | no_license | kotarohara/GISUtilities | 3aed00426ffdba214c3932cddf561d6f6f5bf61d | bfc82cdc75720b178fe0cbea65d96b4168ae9f88 | refs/heads/master | 2021-01-10T07:56:44.845066 | 2016-02-26T23:34:01 | 2016-02-26T23:34:01 | 52,618,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,752 | py | # http://gis.stackexchange.com/questions/26257/how-can-i-iterate-over-map-layers-in-qgis-python
# layers = iface.legendInterface().layers()
# QgsMapLayerRegistry
# http://qgis.org/api/classQgsMapLayerRegistry.html
street_layer_name = "street_edge"
neighborhood_layer_name = "neighborhood"
street_layer = QgsMapLayerRegistry.instance().mapLayersByName(street_layer_name)[0]
neighborhood_layer = QgsMapLayerRegistry.instance().mapLayersByName(neighborhood_layer_name)[0]
# Using vector layer
# http://docs.qgis.org/testing/en/docs/pyqgis_developer_cookbook/vector.html
# Qgis Geometry
# http://qgis.org/api/classQgsGeometry.html
street_count = {}
distance = {}
for neighborhood in neighborhood_layer.getFeatures():
neighborhood_id = neighborhood.attributes()[1]
street_count[neighborhood_id] = 0
distance[neighborhood_id] = 0
for street in street_layer.getFeatures():
if neighborhood.geometry().intersects(street.geometry()):
distance[neighborhood_id] += street.geometry().length()
break
print distance
"""
print street_count
{0: 284, 1: 244, 2: 83, 3: 130, 4: 45, 5: 81, 6: 91, 7: 99, 8: 161, 9: 156, 10: 102, 11: 275, 12: 88, 13: 112, 14: 34, 15: 128, 16: 137, 17: 326, 18: 102, 19: 147, 20: 158, 21: 77, 22: 179, 23: 152, 24: 78, 25: 94, 26: 76, 27: 87, 28: 258, 29: 64, 30: 77, 31: 243, 32: 78, 33: 127, 34: 134, 35: 76, 36: 78, 37: 132, 38: 70, 39: 122, 40: 57, 41: 247, 42: 46, 43: 68, 44: 61, 45: 85, 46: 160, 47: 183, 48: 52, 49: 61, 50: 69, 51: 58, 52: 35, 53: 82, 54: 138, 55: 124, 56: 26, 57: 97, 58: 60, 59: 108, 60: 52, 61: 135, 62: 66, 63: 84, 64: 33, 65: 60, 66: 61, 67: 41, 68: 38, 69: 35, 70: 50, 71: 44, 72: 163, 73: 59, 74: 121, 75: 44, 76: 53, 77: 49, 78: 124, 79: 40, 80: 94, 81: 163, 82: 90, 83: 120, 84: 23, 85: 90, 86: 92, 87: 56, 88: 41, 89: 82, 90: 87, 91: 91, 92: 64, 93: 79, 94: 60, 95: 115, 96: 12, 97: 92, 98: 105, 99: 74, 100: 77, 101: 61, 102: 61, 103: 38, 104: 53, 105: 95, 106: 75, 107: 124, 108: 70, 109: 61, 110: 33, 111: 97, 112: 54, 113: 105, 114: 75, 115: 80, 116: 45, 117: 184, 118: 75, 119: 120, 120: 127, 121: 36, 122: 70, 123: 72, 124: 61, 125: 428, 126: 89, 127: 78, 128: 153, 129: 50, 130: 57, 131: 62, 132: 49, 133: 113, 134: 85, 135: 85, 136: 110, 137: 56, 138: 83, 139: 65, 140: 81, 141: 39, 142: 47, 143: 65, 144: 109, 145: 73, 146: 114, 147: 94, 148: 70, 149: 103, 150: 87, 151: 44, 152: 50, 153: 84, 154: 140, 155: 54, 156: 106, 157: 236, 158: 110, 159: 103, 160: 26, 161: 163, 162: 31, 163: 80, 164: 134, 165: 33, 166: 44, 167: 320, 168: 58, 169: 86, 170: 50, 171: 148, 172: 77, 173: 57, 174: 84, 175: 17, 176: 26, 177: 79, 178: 95, 179: 41, 180: 34, 181: 86, 182: 56, 183: 56, 184: 67, 185: 54, 186: 38, 187: 30, 188: 37, 189: 45, 190: 29, 191: 55}
"""
| [
"koe.bluebear@gmail.com"
] | koe.bluebear@gmail.com |
14867c67fd1d822563fe8ecb1841dce728a316df | 1c801375ead766790f5c097081a1bbbc6a593a9e | /baseSpider/算法/随机获取1000此列表元素并统计次数.py | f9d47c387d183b937269c6fbd47b14e83dfe9a35 | [] | no_license | llf-1996/python3Spider | 5803d1f42b660c7c2643bbc31f17126ac06e7ceb | 4621db8c7383940f8e60754d6640406101141095 | refs/heads/master | 2023-06-01T04:31:27.555140 | 2020-12-13T09:38:19 | 2020-12-13T09:38:19 | 156,145,515 | 2 | 3 | null | 2023-05-23T00:12:59 | 2018-11-05T01:48:46 | Python | UTF-8 | Python | false | false | 527 | py | '''
随机获取一个字符串列表中的字符串,求获取一千次的情况下,各字符串被随机到的次数。
'''
__author__ = 'llf'
import random
from collections import Counter
c = Counter()
ll = ['a', 'b']
for i in range(1000):
a = random.choice(ll)
c[a] = c[a] + 1
print('结果:', type(c), dir(c), c)
'''
<class 'collections.Counter'>
[
'clear', 'copy', 'elements', 'fromkeys', 'get', 'items',
'keys', 'most_common', 'pop', 'popitem', 'setdefault',
'subtract', 'update', 'values'
]
'''
| [
"2367746876@qq.com"
] | 2367746876@qq.com |
c85df11a5f6a16aeda599f79646d5b2ba2d9546d | 7874e77c8c44d568ec24da782f311cd811d43347 | /rango/admin.py | 0ee6b760f8956e053fd92556d1943a424e62b9c2 | [] | no_license | 668Jerry/leisure | 27399b0bc41bf0bb3dc687d1e444a7aa94dd92d9 | ea678d852d4930cd8db9d249d0c5d6be36c9dbb0 | refs/heads/master | 2021-01-25T03:40:21.851039 | 2014-08-02T09:00:49 | 2014-08-02T09:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from django.contrib import admin
# Register your models here.
from django.contrib import admin
from rango.models import Category, Page, UserProfile
admin.site.register(Category)
admin.site.register(Page)
admin.site.register(UserProfile)
| [
"668Jerry@gmail.com"
] | 668Jerry@gmail.com |
3d9db26167c279a19af9f8aece67edc185736ec1 | 495531870c08ea3495bb45393b05f907366f052e | /x7-src/dashboard/steer/steer/dashboards/engine/images_and_snapshots/images/urls.py | c5aa8ebb24e073dc5629b46e39aea61201b738c5 | [
"Apache-2.0"
] | permissive | wendy-king/x7_venv | 5fcb326cf3ecaa26d3b839af743b027d23af29e0 | d8266c1dc474935c54126ce36d1a6410a7e452f5 | refs/heads/master | 2021-01-01T06:33:24.605851 | 2012-01-19T15:54:44 | 2012-01-19T15:54:44 | 3,209,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
VIEWS_MOD = 'steer.dashboards.engine.images_and_snapshots.images.views'
urlpatterns = patterns(VIEWS_MOD,
url(r'^$', 'index', name='index'),
url(r'^(?P<image_id>[^/]+)/launch/$', 'launch', name='launch'),
url(r'^(?P<image_id>[^/]+)/update/$', 'update', name='update'))
| [
"king_wendy@sina.com"
] | king_wendy@sina.com |
324b31607026c0aadfacbabf4ea55e14843ba22c | 0c534e37b572f7f7173ed876393ad3898fc1925a | /pagerank_busqueda.py | 97666a3b4f7661c1f48b26f9a3d11d622a762d40 | [] | no_license | Alenicben/Concepto-Big-Data | 8656002d3fd0676783be2ba83a4bfcaa3de56fa9 | c1dfdc57cca75cc13b245fa94c0842948b521e7a | refs/heads/master | 2020-05-19T08:41:40.449160 | 2019-05-04T17:57:03 | 2019-05-04T17:57:03 | 184,926,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,985 | py | # -*- coding: utf-8 -*-
import json
from bs4 import BeautifulSoup
from w3lib.html import remove_tags
output = ''
def calculo_ranking(output):
list_dict = []
with open('output_indexado.json', 'r') as f:
output = json.loads(json.dumps(json.loads(f.read())))
for url_json in output:
list_dict.append(json.loads(json.dumps(url_json)))
for dicty in list_dict:
dicty.values()[0]["ranking"] = 0.156496876
return list_dict
list_dict = calculo_ranking(output)
def realizar_busqueda(clave, list_dict):
palabras_clave = clave.split()
relevancia = {}
for dicty in list_dict:
palabras_dict = dicty.values()[0]["palabras"]
contador_ocurrencias = 0
for k, v in palabras_dict.items():
if k in palabras_clave:
contador_ocurrencias += v
relevancia[dicty.keys()[0]] = contador_ocurrencias
print('Resultados de la busqueda: ')
contador_resultado = 0
for key in sorted(relevancia.iterkeys()):
print('Resultado: ' + str(contador_resultado))
print(" ")
for dicty in list_dict:
if dicty.keys()[0] == key:
print("Titulo pagina: " + u''.join(dicty.values()[0]["titulo"]).encode('utf-8'))
print(" ")
soup = BeautifulSoup(open('crawled/' + str(key).split("/")[-1] + ".html"), "html.parser", from_encoding='utf-8')
print(str(soup('p')[0]))
print(' ')
print("%s: %s" % ("URL: ", key))
print(" ")
print("Relevancia busqueda: %s" % (str(len(relevancia) - contador_resultado)))
contador_resultado = contador_resultado + 1
print(" ")
print("Ranking Pagina: " + str(dicty.values()[0]["ranking"]))
print(" ")
print(" ")
clave = str(raw_input("Ingresa las palabras que quieres buscar: "))
if len(clave) > 2:
realizar_busqueda(clave, list_dict)
else:
print("Debe digitar al menos 3 caracteres")
| [
"alexisbenzoni@gmail.com"
] | alexisbenzoni@gmail.com |
c9db991be5e0c674b693615146c99f833eb961b0 | 390f61c52288d5d1536c6c3caf67b49a8efd181b | /pdestradbucket/env/bin/pasteurize | af27328d410c7a82b537ae12383e611966f8d2d1 | [] | no_license | pdestrad/risk | 62801000f03ae0becc05db9dbe0a45c94e678288 | 7a71dad0fc8cad9a94f2870fb6a2e67a5e4e0a74 | refs/heads/master | 2022-11-29T22:44:37.107479 | 2018-02-15T05:25:14 | 2018-02-15T05:25:14 | 121,591,761 | 0 | 0 | null | 2022-11-23T19:02:06 | 2018-02-15T04:41:20 | Python | UTF-8 | Python | false | false | 428 | #!/Users/paulestrada/Desktop/pdestradbucket/env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.16.0','console_scripts','pasteurize'
__requires__ = 'future==0.16.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.16.0', 'console_scripts', 'pasteurize')()
)
| [
"paulestrada@Pauls-MacBook-Pro.local"
] | paulestrada@Pauls-MacBook-Pro.local | |
76445db28a181faa6a4d954ac7ea455bd577beb8 | 8da3ee3f2d088a6486b551cc59e1af5ebaa7e9b6 | /api/qp-api-flask/resources/user_login.py | 33be8727119cc3dfdcc8a69617427d35a5263dea | [] | no_license | sai-pothuri/SkillUp-Team-03 | 62f1694f11f80ee781df4d04b356f306b29384b8 | cb6896df5d86c930fd32c569a924b5ab3b725768 | refs/heads/master | 2022-11-17T20:52:23.218203 | 2020-07-16T08:51:45 | 2020-07-16T08:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | from flask_restful import Resource, reqparse
from db import query
from flask_jwt_extended import create_access_token, jwt_required
from werkzeug.security import safe_str_cmp
# this parameter is given globally in this module so that the userdb is changed all over the module if
# changed at one place (here). userdb is set so that while testing locally,
# the local database could have userdb different from 'User'.
# In the database employed for this utility, userdb is 'User'
userdb = 'User'
#User class is used create a User object and also use class methods to
#execute queries and return a User object for it
class User():
def __init__(self, uname, password, rno, branch_name, sem_no):
self.uname = uname
self.password = password
#testing features
self.rno = rno
self.branch_name = branch_name
self.sem_no = sem_no
@classmethod
def getUserByUname(cls,uname):
result=query(f"""SELECT uname,password,rno,branch_name,sem_no FROM users WHERE uname='{uname}'""",
return_json=False,
connect_db=userdb)
if len(result)>0: return User(result[0]['uname'],result[0]['password'],
result[0]['rno'],result[0]['branch_name'],result[0]['sem_no'])
return None
# This resource is defined for the user to login.
class UserLogin(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('uname', type=str, required=True,
help="uname cannot be left blank!")
parser.add_argument('password', type=str, required=True,
help="password cannot be left blank!")
data=parser.parse_args()
user=User.getUserByUname(data['uname'])
if User and safe_str_cmp(user.password,data['password']):
access_token=create_access_token(identity=user.uname,expires_delta=False)
return {'access_token':access_token,
'uname':user.uname,
'rno':user.rno,
'branch_name':user.branch_name,
'sem_no':user.sem_no},200
return {"message":"Invalid Credentials!"}, 401 | [
"noreply@github.com"
] | noreply@github.com |
2258e8477ffdf6a43e11b566c6bf2f1acc4df2c7 | 49bc2cb5e7141d581fa6c1f7cbd5b7341f95b6da | /carpool_env.py | bef6144fd8c2c3c98b8e1be8ec362c61cf565fe2 | [
"MIT"
] | permissive | giabarbou/qlearning_carpool | 20a294f5e4b52e3c64c330f9d06a5eac52d3bd22 | f3459a4e1952b16ede231f5cfbab7192955a759d | refs/heads/master | 2022-11-05T23:53:07.234932 | 2020-06-26T15:52:40 | 2020-06-26T15:52:40 | 273,688,689 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,082 | py | import numpy as np
class CarpoolEnv:
def __init__(self, distances, start=0, goal=0, capacity=4, delay=0.0):
"""
Args:
distances (ndarray): the distance matrix
start (int): driver's initial location index in the distance matrix
goal (int): goal's index in the distance matrix
capacity (int): how many passengers the car fits
delay (float): maximum delay (in seconds or meters based on distance matrix units)
reward_mult (float): factor that is multiplied with reward to give it greater significance
"""
self.dist = distances
self.n_states = distances.shape[0]
self.n_actions = distances.shape[1]
self.start = start
self.goal = goal
self.d_thr = self.dist[self.start][self.goal] + delay # set distance threshold
self.capacity = capacity
self.state = self.start
self.action = self.start
self.done = False
self.passengers = 0
self.dist_covered = 0.0
self.cumulated_reward = 0.0
self.valid_actions = [a for a in range(self.n_actions)
if a != self.start and a != self.goal]
self.route = [self.start]
def reset(self):
self.state = self.start
self.action = self.start
self.done = False
self.passengers = 0
self.dist_covered = 0.0
self.cumulated_reward = 0.0
self.valid_actions = [a for a in range(self.n_actions)
if a != self.start and a != self.goal]
self.route = [self.start]
return self.state, self.valid_actions
def sample(self):
action = np.random.choice(self.valid_actions)
return action
def step(self, action):
if self.done:
return None, 0.0, True, None
self.valid_actions.remove(action)
self.action = action
reward = 0.0
# does selecting the point lead to surpassing the distance threshold
# or not?
if self.dist_covered + self.dist[self.state][self.action] + \
self.dist[self.action][self.goal] <= self.d_thr:
reward += 1.0 - self.dist[self.state][self.action] / self.d_thr
self.passengers += 1
self.dist_covered += self.dist[self.state][self.action]
self.route.append(action)
self.state = self.action
else:
self.done = True
# check if car is full or there are no more actions to take
if self.passengers >= self.capacity or not self.valid_actions:
self.done = True
if self.done:
reward += 1.0 - self.dist[self.state][self.goal] / self.d_thr
self.dist_covered += self.dist[self.state][self.goal]
self.route.append(self.goal)
self.state = None
self.cumulated_reward += reward
return self.state, reward, self.done, self.valid_actions
| [
"noreply@github.com"
] | noreply@github.com |
22f50ab9a1d1f3a8476e439ac7064950872f5c1d | 380c4ce1704a6052a435da0ab340f2cca4339b8c | /scrapersite/celery.py | 4b7bd06767c20ff0c5ad7546ed9a73142d60437e | [
"MIT",
"Apache-2.0"
] | permissive | lordoftheflies/gargantula-scrapersite | 68c10b813df404ad26bcb8c4ad6471fa82040ea2 | 0abcb82bf30540ac5cd57d5ec9178e692a1a2ca6 | refs/heads/master | 2020-03-25T07:18:18.084391 | 2018-09-25T16:59:59 | 2018-09-25T16:59:59 | 143,553,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | from __future__ import absolute_import
import os
import dotenv
from celery import Celery
from django.conf import settings as django_settings
dotenv_path = os.path.join(str(os.path.expanduser('~')), '.gargantula')
dotenv.load_dotenv(dotenv_path=dotenv_path)
ENVIRONMENT = os.getenv('ENVIRONMENT')
if ENVIRONMENT == 'STAGING':
settings = 'staging'
elif ENVIRONMENT == 'PRODUCTION':
settings = 'production'
else:
settings = 'development'
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrapersite.settings')
# os.environ['DJANGO_SETTINGS_MODULE'] = 'scrapersite.settings'
os.environ.setdefault('DJANGO_CONFIGURATION', settings.title())
import configurations
configurations.setup()
app = Celery('scrapersite')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
# app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: django_settings.INSTALLED_APPS)
# app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| [
"laszlo.hegedus@cherubits.hu"
] | laszlo.hegedus@cherubits.hu |
2351627cba429794c787f1b8b52c0bf5472cd577 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/mchjos007/question2.py | 95a03b09fd80edb8ce36c1da69e53ec942c2d03e | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | filein = open (input("Enter the input filename:\n"), "r")
lines = filein.readlines()
filein.close()
fileOut = open(input("Enter the output filename:\n"),"w")
width = eval(input("Enter the line width:\n"))
finalFormattedString=""
linecount= 0
currentlineinprogress = ""
for currentline in lines:
wordcount=0
linecount += 1
currentlinearray = currentline.split(" ")
if(currentline != "\n"):
for word in currentlinearray:
wordcount+=1
if linecount == len(lines) and wordcount == len(currentlinearray):
if len(currentlineinprogress) + len(word) >= width:
finalFormattedString += currentlineinprogress +"\n" + word
currentlineinprogress = ""
else:
finalFormattedString += currentlineinprogress +" " + word
else:
if word[-1] == "\n":
word = word[:-1]
if len(currentlineinprogress) + len(word) >= width:
finalFormattedString += currentlineinprogress +"\n"
currentlineinprogress = ""
if currentlineinprogress != "":
currentlineinprogress+= " "
currentlineinprogress += word
else:
finalFormattedString += currentlineinprogress + "\n\n"
currentlineinprogress = ""
print(finalFormattedString, file = fileOut)
fileOut.close() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
1c200b87ea72282a7b3e002066c80f83ecee2a12 | 45d6097f72ccf8e0a1300f7f3f4d0b93d7b2f513 | /src/chat_generator.py | 5105f104adea09fdf13f50de3e689147fd0f7e45 | [] | no_license | zayd/chat | d66834964811afb2afd5bc4a9a97e6b0ed44a9ec | 3c7d900b5d4b348ecdbdc7b124e39a45e55cc941 | refs/heads/master | 2021-06-10T02:58:43.461180 | 2016-12-02T02:19:20 | 2016-12-02T02:19:20 | 61,749,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | """ Interface between server app and ranking back-end. Do pre/post processing here """
from chat_ranker import ChatRanker
import config
import cPickle as pickle
class ChatGenerator(object):
def __init__(self, corpus_path=config.CORPUS_PATH, mode='deploy'):
self.mode = mode
self.ranker = ChatRanker(corpus_path=corpus_path, mode=mode)
with open(corpus_path) as f:
source_train, target_train, source_test, self.target_test = pickle.load(f)
if self.mode == 'final':
self.target_test = self.target_test + target_train
def generate_response(self, query):
topk = self._postprocess(
self.ranker.topk(self._preprocess(query)))
return topk
def _preprocess(self, query):
""" Identity for now """
return query
def _postprocess(self, responses):
""" Convert list to dicts for frontend """
for idx, response in enumerate(responses):
responses[idx] = {'id': response[0],
'text': self.target_test[response[0]]}
for jdx, score in enumerate(response[1:]):
responses[idx]['score_' + str(jdx)] = response[1:][jdx]
return responses
if __name__ == "__main__":
cg = ChatGenerator()
topk = cg.generate_response("This is a test of the emergency broadcast system")
import IPython; IPython.embed();
| [
"zayd@berkeley.edu"
] | zayd@berkeley.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.