id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1751781 | #!/bin/python
#coding:utf-8
import roomai
class Stage:
firstStage = 1
secondStage = 2
thirdStage = 3
fourthStage = 4
AllCardsPattern = dict()
#0 1 2 3 4 5 6
#name, isStraight, isPair, isSameSuit, [SizeOfPair1, SizeOfPair2,..](desc), rank, cards
AllCardsPattern["Straight_SameSuit"] = \
["Straight_SameSuit", True, False, True, [], 100]
AllCardsPattern["4_1"] = \
["4_1", False, True, False, [4,1], 98]
AllCardsPattern["3_2"] = \
["3_2", False, True, False, [3,2], 97]
AllCardsPattern["SameSuit"] = \
["SameSuit", False, False, True, [], 96]
AllCardsPattern["Straight_DiffSuit"] = \
["Straight_DiffSuit", True, False, False, [], 95]
AllCardsPattern["3_1_1"] = \
["3_1_1", False, True, False, [3,1,1], 94]
AllCardsPattern["2_2_1"] = \
["2_2_1", False, True, False, [2,2,1], 93]
AllCardsPattern["2_1_1_1"] = \
["2_1_1_1", False, True, False, [2,1,1,1], 92]
AllCardsPattern["1_1_1_1_1"] = \
["1_1_1_1_1", False, True, False, [1,1,1,1,1],91]
point_str_to_rank = {'2': 0, '3': 1, '4': 2, '5': 3, '6': 4, '7': 5, '8': 6, '9': 7, 'T': 8, 'J': 9, 'Q': 10, 'K': 11, 'A': 12}
point_rank_to_str = {0: '2', 1: '3', 2: '4', 3: '5', 4: '6', 5: '7', 6: '8', 7: '9', 8: 'T', 9: 'J', 10: 'Q', 11: 'K', 12: 'A'}
suit_str_to_rank = {'Spade': 0, 'Heart': 1, 'Diamond': 2, 'Club': 3}
suit_rank_to_str = {0: 'Spade', 1: 'Heart', 2: 'Diamond', 3: 'Club'}
class PokerCard(object):
'''
A Poker Card. \n
A Poker Card has a point (2,3,4,....,K,A) and a suit (Spade, Heart, Diamond, Club). \n
Different points have different ranks, for example the point 2's rank is 0, and the point A's rank is 12. \n
Different suits have different ranks too. \n
A Poker Card has a key (point_suit). We strongly recommend you to get a poker card by using the class function lookup with the key. \n
Examples of the class usages: \n
>> import roomai.games.texasholdem \n
>> card = roomai.games.texasholdem.BangCard.lookup("2-Spade") \n
>> card.point \n
2\n
>> card.suit\n
Spade\n
>> card.point_rank\n
0\n
>> card.suit_rank\n
0\n
>> card.key\n
"2-Spade"\n
'''
def __init__(self, point, suit=None):
point1 = 0
suit1 = 0
if suit is None:
kv = point.split("-")
point1 = point_str_to_rank[kv[0]]
suit1 = suit_str_to_rank[kv[1]]
else:
point1 = point
if isinstance(point, str):
point1 = point_str_to_rank[point]
suit1 = suit
if isinstance(suit, str):
suit1 = suit_str_to_rank[suit]
self.__point__ = point_rank_to_str[point1]
self.__suit__ = suit_rank_to_str[suit1]
self.__point_rank__ = point1
self.__suit_rank__ = suit1
self.__key__ = "%s-%s" % (self.__point__, self.__suit__)
def __get_point_str__(self):
return self.__point__
point = property(__get_point_str__, doc="The point of the poker card")
def __get_suit_str__(self):
return self.__suit__
suit = property(__get_suit_str__, doc="The suit of the poker card")
def __get_point_rank__(self):
return self.__point_rank__
point_rank = property(__get_point_rank__, doc="The point rank of the poker card")
def __get_suit_rank__(self):
return self.__suit_rank__
suit_rank = property(__get_suit_rank__, doc="The suit rank of the poker card")
def __get_key__(self):
return self.__key__
key = property(__get_key__, doc="The key of the poker card")
@classmethod
def lookup(cls, key):
'''
lookup a BangCard with the specified key
:param key: The specified key
:return: The BangCard with the specified key
'''
logger = roomai.get_logger()
if key not in AllPokerCards:
logger.fatal("key (%s) is not invalid poker card key"%(key))
raise ValueError("key (%s) is not invalid poker card key"%(key))
return AllPokerCards[key]
@classmethod
def point_to_rank(cls, point):
if point not in point_str_to_rank:
raise ValueError("%s is invalid poker point for BangCard")
return point_str_to_rank[point]
@classmethod
def suit_to_rank(cls, suit):
if suit not in suit_str_to_rank:
raise ValueError("%s is invalid poker suit for BangCard")
return suit_str_to_rank[suit]
@classmethod
def rank_to_point(cls, rank):
if rank not in point_rank_to_str:
raise ValueError("%d is invalid poker point rank for BangCard")
return point_rank_to_str[rank]
@classmethod
def rank_to_suit(cls, rank):
if rank not in suit_rank_to_str:
raise ValueError("%d is invalid poker suit rank for BangCard")
return suit_rank_to_str[rank]
@classmethod
def compare(cls, pokercard1, pokercard2):
'''
Compare two poker cards with their point ranks and suit ranks.
The poker card with the higher point rank has the higher rank.
With the same point rank, the poker card with the higher suit rank has the higher rank.
:param pokercard1:
:param pokercard2:
:return: A number, which is >0 when the poker card1 has the higher rank than the poker card2, =0 when their share the same rank, <0 when the poker card1 has the lower rank than the poker card2
'''
pr1 = pokercard1.point_rank
pr2 = pokercard2.point_rank
if pr1 != pr2:
return pr1 - pr2
else:
return pokercard1.suit_rank - pokercard2.suit_rank
def __deepcopy__(self, memodict={}, newinstance=None):
return AllPokerCards[self.key]
AllPokerCards = dict()
for point in point_str_to_rank:
for suit in suit_str_to_rank:
AllPokerCards["%s-%s" % (point, suit)] = PokerCard("%s-%s" % (point, suit))
| StarcoderdataPython |
3200169 | import sys
import traceback
__author__ = 'xshu'
# global variables
symbolsFound = []
geneSymbol2IDMapping = {}
geneSymbol2SynonymsMapping = {}
def showHelp():
print\
'''
This program creates the initial gene2uniprot by updating aliases in hgncSymbolAlias2Uniprot with NCBI gene-info file
Usage: %s
Parameter Description
-geneInfo gene_info file from NCBI
-hgnc2uniprot hgnc symbols to uniprot id downloaded from HUGO
-output Output file
(eg. %s -geneInfo <<absolute gene_info file path>>
-hgnc2uniprot <<absolute hgnc2uniprot file path>>
-output <<absolute output file path>> )
''' % (sys.argv[0], sys.argv[0])
def loadGeneInfo(geneInfo):
if geneInfo != "":
geneInfoHandle = open(geneInfo, "r")
for line in geneInfoHandle:
line = line.rstrip("\n")
fields = line.split("\t")
geneID = (long)(fields[0].strip())
geneSymbol = fields[1].strip()
geneSynonyms = fields[2].strip().replace("|", ";")
if geneSymbol not in geneSymbol2IDMapping:
geneSymbol2IDMapping[geneSymbol] = geneID
if geneSymbol not in geneSymbol2SynonymsMapping:
geneSymbol2SynonymsMapping[geneSymbol] = geneSynonyms
# new hgnc file format
# hgnc2uniprot download has been changed to include approved symbols, previous symbols, synonyms, and uniprot IDs
# now including previous HGNC symbols
def create(output, hgnc2uniprot):
gene2uniprotHandle = open(output, "w")
hgnc2uniprotHandle = open(hgnc2uniprot, "r")
hgnc2uniprotHandle.readline()
for line in hgnc2uniprotHandle:
line = line.rstrip("\n")
fields = line.split("\t")
if len(fields) == 4:
hgncSymbol = fields[0].strip()
hgncPreviousSymbol = fields[1].strip().split(", ")
hgncAliases = fields[2].strip().split(", ")
unitprotID = fields[3].strip()
if hgncSymbol in geneSymbol2SynonymsMapping and hgncSymbol in geneSymbol2IDMapping:
symbolsFound.append(hgncSymbol)
geneID = geneSymbol2IDMapping[hgncSymbol]
ncbiSynonyms = geneSymbol2SynonymsMapping[hgncSymbol]
if len(hgncAliases) >= 0:
for alias in hgncAliases:
if ncbiSynonyms.find(alias) == -1:
alias.strip()
ncbiSynonyms += ";"
ncbiSynonyms += alias
# this is new code for adding previous HGNC symbols
if hgncSymbol in geneSymbol2IDMapping:
hgncOLD = ""
if len(hgncPreviousSymbol) >= 0:
for old in hgncPreviousSymbol:
if hgncOLD.find(old) == -1:
old.strip()
hgncOLD += old
hgncOLD += ";"
gene2uniprotHandle.write(
"%s\t%s\t%s\t%s\t%s\n" % (geneID, hgncSymbol, ncbiSynonyms, hgncOLD, unitprotID.rstrip(";")))
hgnc2uniprotHandle.close()
# append genes that are NOT included in the hgnc2uniprot file
for hgncSymbol in geneSymbol2IDMapping:
if hgncSymbol not in symbolsFound:
geneID = geneSymbol2IDMapping[hgncSymbol]
synonyms = geneSymbol2SynonymsMapping[hgncSymbol]
gene2uniprotHandle.write("%s\t%s\t%s\t%s\t%s\n" %
(geneID, hgncSymbol, synonyms, "", ""))
gene2uniprotHandle.close()
def _mainFunc():
try:
for index in range(len(sys.argv)):
if sys.argv[index] == "-geneInfo":
geneInfo = sys.argv[index + 1].strip()
elif sys.argv[index] == "-hgnc2uniprot":
hgnc2uniprot = sys.argv[index + 1].strip()
elif sys.argv[index] == "-output":
output = sys.argv[index + 1].strip()
if geneInfo == "" or hgnc2uniprot == "" or output == "":
raise Exception("All parameters are required!")
# todo: Memory is in an intensive use when the mappings are pre-loaded.
# Check if PyTables can offer an alternative whenever possible
loadGeneInfo(geneInfo)
create(output, hgnc2uniprot)
except Exception:
traceback.print_exc()
showHelp()
# Initialization and Execution
# Direct invocation
if __name__ == "__main__":
_mainFunc()
| StarcoderdataPython |
4830259 | from output.models.ms_data.identity_constraint.id_g029_xsd.id_g029 import (
Root,
T,
)
__all__ = [
"Root",
"T",
]
| StarcoderdataPython |
1768253 | #!/usr/bin/python -W all
"""
findRoute.py: find longest route with an index of the available train rides
usage: findRoute.py [-b beam-size] [-f firstStation] [-h] [-H history-file] [-i] [-n] [-s time] [-S] < traintrips.txt
note: expected input line formats:
1. hash sign distance start-station end-station
2. (often on 4 separate lines) start-time end-time transfers travel-time
-b: beam size
-f: first station: start all routes here
-h: show help message and exit
-H history-file: file with partial journey; like output format:
startTime endTime waitingTime distance speed startStation endStation
-i: ignore transfer safety times
-n: create new route/delete old route information
-s: start time of search, format: HH:MM (hours and minutes)
-S: show the speeds of the various trips
20170617 erikt(at)xs4all.nl developed for my 2017 kmkampioen participation
"""
import getopt
import re
import sys
# constants
COMMAND = sys.argv.pop(0)
TIMEZERO = "06:00" # start time of competition
DAYTIME = "18:00" # duration of competition
MAXWAIT = "00:29" # do not stay at any station longer than this
MINRETURNWAITINGTIME = "00:02" # need at least 2 minutes to catch train back
CENTERNAME = "utrechtcentraal" # visit this station...
CENTERSTARTTIME = "11:00" # between this time and...
CENTERENDTIME = "15:00" # this time and...
CENTERWAITTIME = "00:05" # stay there at least this many minutes
MAXTIMERESERVE = "00:00" # number of last minute(s) of 24 hours as reserve
MINUTESPERHOUR = 60.0
PARTNERFILE="partners" # tracks with overlapping parts
STATIONSFILE="stations" # list of station names
TRANSFERSFILE="transfers" # minimum required time per transfer
TIMEDISTANCEFILE = "time-distance" # best distance covered per time of earlier runs
HELP="""usage: findRoute.py [-b beam-size] [-f firstStation] [-h] [-H history-file] [-i] [-n] [-s time] [-S] < traintrips.txt
-b: beam size
-f: first station: start all routes here
-h: show help message and exit
-H history-file: file with partial journey; like output format:
startTime endTime waitingTime distance speed startStation endStation
-i: ignore transfer safety times
-n: create new route/delete old route information
-s: start time of search, format: HH:MM (hours and minutes)
-S: show the speeds of the various trips"""
# variables modifiable by arguments
beamSize = 20
historyFile = ""
firstStation = ""
globalStartTime = TIMEZERO # start the journey at this time (or a little bit later)
doShowSpeeds = False
resetBestDistances = False
ignoreTransferSafetyTimes = False
# internal variables
index = {}
partners = {}
stations = {}
transfers = {}
trainTrips = []
maxDistance = 0
timeDistance = {}
def help():
print(HELP)
sys.exit()
def reverseTrack(track):
startStation,endStation = track.split()
return(endStation+" "+startStation)
def readStations():
try: inFile = open(STATIONSFILE,"r")
except: sys.exit(COMMAND+": cannot read file "+STATIONSFILE+"\n")
stations = {}
for line in inFile:
line = line.rstrip()
stations[line] = True
return(stations)
# read minimal transfer times
def readTransfers():
transfers = {}
try: inFile = open(TRANSFERSFILE,"r")
except: return(transfers)
for line in inFile:
line = line.rstrip()
fields = line.split()
if len(fields) < 5: sys.exit(COMMAND+": unexpected line in file "+TRANSFERSFILE+": "+line+"\n")
time = fields.pop(0)
for i in range(0,4):
if not fields[i] in stations:
sys.exit(COMMAND+": unknown station "+fields[i]+" on line: "+line+"\n")
line = " ".join(fields)
transfers[line] = time
return(transfers)
# read time-distance file
def readTimeDistance():
timeDistance = {}
try: inFile = open(TIMEDISTANCEFILE,"r")
except: return(timeDistance)
patternHashStart = re.compile("^#")
for line in inFile:
line = line.rstrip()
if patternHashStart.match(line): continue
fields = line.split()
if len(fields) == 3: return({}) # old format
if len(fields) != 4: sys.exit(COMMAND+": unexpected line in file "+TIMEDISTANCEFILE+": "+line+"\n")
station,startTime,time,distance = fields
timeDistance[station+" "+startTime+" "+time] = float(distance)
return(timeDistance)
# write time-distance file
def writeTimeDistance(timeDistance):
try: outFile = open(TIMEDISTANCEFILE,"w")
except: sys.exit(COMMAND+": cannot write file "+TIMEDISTANCEFILE+"\n")
startStationsTimes = {}
for keyTD in timeDistance:
fields = keyTD.split()
if len(fields) != 3: sys.exit(COMMAND+": invalid time-distance key: "+keyTD+"\n")
station,startTime,time = keyTD.split()
startStationsTimes[station+" "+startTime] = True
for stationTime in startStationsTimes:
lastDistance = 0
for minutes in range(0,time2minutes("25:01")):
thisTime = minutes2time(minutes)
keyTD = stationTime+" "+thisTime
if not keyTD in timeDistance or timeDistance[keyTD] <= lastDistance:
outFile.write(keyTD+" "+str(lastDistance)+"\n")
else:
outFile.write(keyTD+" "+str(timeDistance[keyTD])+"\n")
lastDistance = timeDistance[keyTD]
outFile.close()
def averageSpeed(distance,startTime,endTime):
averageSpeed = MINUTESPERHOUR*distance/(time2minutes(endTime)-time2minutes(startTime))
return(averageSpeed)
def readTrainTrips():
# regular expressions
patternHashStart = re.compile("^#")
patternIsTime = re.compile("^\d\d:\d\d$")
patternIsNumber = re.compile("^\d+(\.\d+)?$")
# variables
trainTrips = []
startStation = ""
endStation = ""
distance = 0
lines = [] # contain train trip information (spread over several lines)
for line in sys.stdin:
line = line.rstrip()
# line with route meta data start with a hash sign
# example: # 39 amsterdamcentraal utrechtcentraal
if patternHashStart.match(line):
fields = line.split()
# sanity checks
if len(fields) != 4:
sys.exit(COMMAND+": unexpected line in data file: "+line+"\n")
if not patternIsNumber.match(fields[1]):
sys.exit(COMMAND+": missing distance on line: "+line+"\n")
distance = float(fields[1])
startStation = fields[2]
endStation = fields[3]
for station in fields[2:4]:
if not station in stations:
sys.exit(COMMAND+": unknown station on stdin: "+station+"\n")
lines = []
else:
# lines with train trip information are grouped in sets of four
# 1. start time, 2. end time, 3. number of transfers, 4. travel time
fields = line.split()
lines.extend(fields)
if len(lines) > 4:
sys.exit(COMMAND+": unexpected schedule data (quantity): "+str(lines)+"\n")
if len(lines) == 4:
startTime = lines[0]
endTime = lines[1]
if not patternIsTime.match(startTime) or \
not patternIsTime.match(endTime):
sys.exit(COMMAND+": unexpected schedule data (times): "+str(lines)+"\n")
# do not allow travelling over the day end
if startTime >= endTime:
sys.exit(COMMAND+": unexpected start and end time: "+str(lines)+"\n")
speed = averageSpeed(distance,startTime,endTime)
trainTrips.append({"startStation":startStation,"endStation":endStation,"startTime":startTime,"endTime":endTime,"distance":distance,"averageSpeed":speed})
# clear lines buffer
lines = []
return(trainTrips)
def time2minutes(time):
chars = list(time)
minutes = 600*int(chars[0])+60*int(chars[1])+10*int(chars[3])+int(chars[4])
return(minutes)
def minutes2time(minutes):
hours = int(float(minutes)/MINUTESPERHOUR)
minutes = int(minutes-hours*MINUTESPERHOUR)
if hours < 10: hours = "0"+str(hours)
else: hours = str(hours)
if minutes < 10: minutes = "0"+str(minutes)
else: minutes = str(minutes)
return(hours+":"+minutes)
def computeTimes(startTime,waitingTime):
startMinutes = time2minutes(startTime)
waitingMinutes = time2minutes(waitingTime)
times = []
for minutes in range(startMinutes-waitingMinutes,startMinutes+1):
if minutes >= 0:
time = minutes2time(minutes)
if time >= globalStartTime: times.append(time)
return(times)
def makeIndex(trainTrips,transfers):
index = {}
# first check at which stations we can be at what times
for i in range(0,len(trainTrips)):
key = trainTrips[i]["endStation"]+" "+trainTrips[i]["endTime"]
# we need follow-up routes for any station a trip finishes at
if not key in index: index[key] = {}
# keep the start station as well
index[key][trainTrips[i]["startStation"]] = {}
# we need follow-up routes for any station we can start the day
if trainTrips[i]["startTime"] <= MAXWAIT:
key = trainTrips[i]["endStation"]+" "+globalStartTime
if not key in index: index[key] = {}
# no start station: use the end station as start staion
index[key][trainTrips[i]["endStation"]] = {}
# next look for appropriate places to use a train trip
for i in range(0,len(trainTrips)):
for time in computeTimes(trainTrips[i]["startTime"],MAXWAIT):
startStation = trainTrips[i]["startStation"]
key = startStation+" "+time
if key in index:
for prevStartStation in index[key]:
# we keep only the time closest to now
# this causes a problem when the station requires a longer waiting time:
# the next time is missing
endStation = trainTrips[i]["endStation"]
trackPair = prevStartStation+" "+startStation+" "+startStation+" "+endStation
trackPairTime = trackPair+" "+time
waitingTime = minutes2time(time2minutes(trainTrips[i]["startTime"])-time2minutes(time))
nextTrip = {"startTime":trainTrips[i]["startTime"],"endTime":trainTrips[i]["endTime"],"distance":trainTrips[i]["distance"],"averageSpeed":trainTrips[i]["averageSpeed"]}
# collect all relevant trips for the start of the route
if time == TIMEZERO:
if not endStation in index[key][prevStartStation]: index[key][prevStartStation][endStation] = []
index[key][prevStartStation][endStation].append(nextTrip)
# for continuing a route, just keep the best time for each destination; consider the minimal transfer times
elif (not endStation in index[key][prevStartStation] or \
trainTrips[i]["endTime"] < index[key][prevStartStation][endStation][0]["endTime"]) and \
(prevStartStation != endStation or waitingTime >= MINRETURNWAITINGTIME or ignoreTransferSafetyTimes) and \
(not trackPair in transfers or waitingTime >= transfers[trackPair] or ignoreTransferSafetyTimes) and \
(not trackPairTime in transfers or waitingTime >= transfers[trackPairTime] or ignoreTransferSafetyTimes):
if not endStation in index[key][prevStartStation]: index[key][prevStartStation][endStation] = [nextTrip]
else: index[key][prevStartStation][endStation][0] = nextTrip
return(index)
def centerVisited(route):
if len(route) == 0: return(True)
if route[-1]["endTime"] < CENTERENDTIME or ignoreTransferSafetyTimes: return(True)
for i in range(1,len(route)):
# did we arrive at the center station in the time frame, waitin 5 mins
if route[i]["startStation"] == CENTERNAME and route[i]["waitingTime"] >= CENTERWAITTIME and \
((route[i]["startTime"] >= CENTERSTARTTIME and route[i]["startTime"] <= CENTERENDTIME) or
(route[i-1]["endTime"] >= CENTERSTARTTIME and route[i-1]["endTime"] <= CENTERENDTIME)): return(True)
return(False)
def printRoute(route):
for trainTrip in route:
print("%s %s %s %0.1f %0.1f %d %s %s" % (trainTrip["startTime"],trainTrip["endTime"],trainTrip["waitingTime"],trainTrip["distance"],trainTrip["lessThanBest"],int(trainTrip["averageSpeed"]),trainTrip["startStation"],trainTrip["endStation"]))
# compute the maximum (end) time for a given start time
def computeMaxTime(startTime):
minutes = time2minutes(startTime)+time2minutes(DAYTIME)
if not ignoreTransferSafetyTimes: minutes -= time2minutes(MAXTIMERESERVE)
return(minutes2time(minutes))
def fillTimeDistance(startStation,startTime,endTime,distance):
global timeDistance
for minutes in range(0,time2minutes("25:01")):
thisTime = minutes2time(minutes)
if thisTime > endTime:
keyTD = startStation+" "+startTime+" "+thisTime
if not keyTD in timeDistance or timeDistance[keyTD] <= distance: timeDistance[keyTD] = distance
else: return()
def findRoute(index,route,travelled,distance):
global maxDistance,maxTime,timeDistance
if distance > 0:
keyTD = route[0]["startStation"]+" "+route[0]["startTime"]+" "+route[-1]["endTime"]
if keyTD not in timeDistance or timeDistance[keyTD] < distance:
timeDistance[keyTD] = distance
fillTimeDistance(route[0]["startStation"],route[0]["startTime"],route[-1]["endTime"],distance)
route[-1]["lessThanBest"] = 0.0
if distance >= maxDistance:
maxDistance = distance
printRoute(route)
print("# largest distance : %0.1f" % (maxDistance))
# start of route: check all stations at start time
if len(route) == 0:
for key in index:
fields = key.split()
if len(fields) < 2:
sys.exit(COMMAND+": incorrect key in index: "+key+"\n")
startStation = fields[0]
if fields[1] == globalStartTime and (firstStation == "" or startStation == firstStation):
for endStation in index[key][startStation]:
for i in range(0,len(index[key][startStation][endStation])):
startTime = index[key][startStation][endStation][i]["startTime"]
if globalStartTime == TIMEZERO or startTime == globalStartTime:
endTime = index[key][startStation][endStation][i]["endTime"]
distance = index[key][startStation][endStation][i]["distance"]
averageSpeed = index[key][startStation][endStation][i]["averageSpeed"]
waitingTime = minutes2time(time2minutes(startTime)-time2minutes("00:00"))
maxTime = computeMaxTime(startTime)
track = startStation+" "+endStation
travelled = {track:True,reverseTrack(track):True}
findRoute(index,[{"startStation":startStation,"endStation":endStation,"startTime":startTime,"endTime":endTime,"distance":distance,"averageSpeed":averageSpeed,"waitingTime":waitingTime,"lessThanBest":0.0}],travelled,distance)
# store new time-distance data for this start station
writeTimeDistance(timeDistance)
# continue a route
else:
prevStartStation = route[-1]["startStation"]
startStation = route[-1]["endStation"]
time = route[-1]["endTime"]
key = startStation+" "+time
for endStation in index[key][prevStartStation]:
track = startStation+" "+endStation
endTime = index[key][prevStartStation][endStation][0]["endTime"]
if endTime <= maxTime:
repeatedTrack = track in travelled
startTime = index[key][prevStartStation][endStation][0]["startTime"]
if len(route) == 1 and route[-1]["distance"] == 0.0:
maxTime = computeMaxTime(startTime)
waitingTime = minutes2time(time2minutes(startTime)-time2minutes(route[-1]["endTime"]))
if endStation != route[-1]["startStation"] or waitingTime >= MINRETURNWAITINGTIME or ignoreTransferSafetyTimes:
# add track
lastTrackPair = route[-1]["startStation"]+" "+route[-1]["endStation"]+" "+track
lastTrackPairEndTime = lastTrackPair+" "+time
thisDistance = 0.0
if not repeatedTrack:
travelled[track] = True
travelled[reverseTrack(track)] = True
thisDistance = index[key][prevStartStation][endStation][0]["distance"]
if track in partners:
for i in range(0,len(partners[track])):
if partners[track][i]["partner"] in travelled:
thisDistance -= partners[track][i]["distance"]
distance += thisDistance
averageSpeed = index[key][prevStartStation][endStation][0]["averageSpeed"]
keyTD = route[0]["startStation"]+" "+route[0]["startTime"]+" "+endTime
lessThanBest = 0.0
if keyTD in timeDistance: lessThanBest = timeDistance[keyTD]-distance
route.append({"startStation":startStation,"endStation":endStation,"startTime":startTime,"endTime":endTime,"distance":thisDistance,"averageSpeed":averageSpeed,"waitingTime":waitingTime,"lessThanBest":lessThanBest})
# continue search
if centerVisited(route) and lessThanBest <= beamSize and \
(not lastTrackPair in transfers or waitingTime >= transfers[lastTrackPair] or ignoreTransferSafetyTimes) and \
(not lastTrackPairEndTime in transfers or waitingTime >= transfers[lastTrackPairEndTime] or ignoreTransferSafetyTimes):
findRoute(index,route,travelled,distance)
# delete track
if not repeatedTrack:
del travelled[track]
del travelled[reverseTrack(track)]
distance -= thisDistance
route.pop(-1)
def readRoute(fileName):
global maxTime
try: inFile = open(fileName,"r")
except: sys.exit(COMMAND+": cannot read file "+fileName+"\n")
route = []
travelled = {}
totalDistance = 0
patternHashStart = re.compile("^#")
patternNumberChar = re.compile("^[0-9][0-9a-z]*")
for line in inFile:
line = line.rstrip()
if patternHashStart.match(line): continue
fields = line.split()
# expected line format: 00:02 00:15 00:02 20 0 92 rotterdamcentraal dordrecht 0
if len(fields) < 8: sys.exit(COMMAND+": unexpected line in file "+fileName+": "+line+"\n")
# remove final number from list (make its presence optional)
while len(fields) > 0 and patternNumberChar.match(fields[-1]): fields.pop(-1)
startTime = fields[0]
if len(route) == 0: maxTime = computeMaxTime(startTime)
endTime = fields[1]
waitingTime = fields[2]
distance = float(fields[3])
totalDistance += distance
lessThanBest = float(fields[4])
averageSpeed = int(fields[5])
startStation = fields[-2]
endStation = fields[-1]
for station in fields[-2:]:
if not station in stations:
sys.exit(COMMAND+": unknown station in file "+fileName+" : "+station+"\n")
track = startStation+" "+endStation
travelled[track] = True
travelled[reverseTrack(track)] = True
route.append({"startStation":startStation,"endStation":endStation,"startTime":startTime,"endTime":endTime,"distance":distance,"waitingTime":waitingTime,"averageSpeed":averageSpeed,"lessThanBest":lessThanBest})
inFile.close()
return({"travelled":travelled, "route":route, "distance":totalDistance})
def showSpeeds(index):
speeds = {}
for key1 in index:
startStation,time = key1.split()
for prevStartStation in index[key1]:
for endStation in index[key1][prevStartStation]:
key2 = startStation+" "+endStation
if not key2 in speeds: speeds[key2] = {}
speeds[key2][int(index[key1][prevStartStation][endStation][0]["averageSpeed"])] = True
for key2 in speeds:
for speed in sorted(speeds[key2],reverse=True): sys.stdout.write(str(speed)+" ")
print(key2)
# read track pairs that share a section
def readPartners():
partners = {}
patternHashStart = re.compile("^#")
try: inFile = open(PARTNERFILE,"r")
except: sys.exit(COMMAND+": cannot read file "+PARTNETFILE+"\n")
for line in inFile:
line = line.rstrip()
if patternHashStart.match(line): continue
fields = line.split()
if len(fields) < 5: sys.exit(COMMAND+": unexpected line in file "+PARTNERFILE+": "+line+"\n")
station1,station2,station3,station4,distance = fields
distance = float(distance)
for station in fields[0:4]:
if not station in stations:
sys.exit(COMMAND+": unknown station in file "+PARTNERFILE+": "+station+"\n")
if not station1+" "+station2 in partners: partners[station1+" "+station2] = []
if not station2+" "+station1 in partners: partners[station2+" "+station1] = []
if not station3+" "+station4 in partners: partners[station3+" "+station4] = []
if not station4+" "+station3 in partners: partners[station4+" "+station3] = []
partners[station1+" "+station2].append({"partner":station3+" "+station4,"distance":distance})
partners[station2+" "+station1].append({"partner":station4+" "+station3,"distance":distance})
partners[station3+" "+station4].append({"partner":station1+" "+station2,"distance":distance})
partners[station4+" "+station3].append({"partner":station2+" "+station1,"distance":distance})
inFile.close()
return(partners)
def main(argv):
global beamSize,doShowSpeeds,firstStation,globalStartTime,historyFile,ignoreTransferSafetyTimes,resetBestDistances
global index,partners,stations,transfers,trainTrips,maxDistance,timeDistance,maxTime
stations = readStations()
options,args = getopt.getopt(argv,"b:f:hH:ins:S")
if len(args) > 0: sys.exit(COMMAND+": unexpected extra argument: "+args[0])
for option,value in options:
if option == "-b": beamSize = float(value)
elif option == "-f": firstStation = value
elif option == "-h": help()
elif option == "-H": historyFile = value
elif option == "-i":
ignoreTransferSafetyTimes = True
elif option == "-n": resetBestDistances = True
elif option == "-s": globalStartTime = value
elif option == "-S": doShowSpeeds = True
if firstStation != "" and not firstStation in stations:
sys.exit(COMMAND+": unknown first station: "+firstStation+"\n")
patternTime = re.compile("^\d\d:\d\d$")
if not patternTime.match(globalStartTime):
sys.exit(COMMAND+": unexpected start time argument value for -s: "+globalStartTime+"\n")
maxTime = computeMaxTime(globalStartTime) # needs function to be computed
if not resetBestDistances: timeDistance = readTimeDistance()
trainTrips = readTrainTrips()
transfers = readTransfers()
index = makeIndex(trainTrips,transfers)
partners = readPartners()
if doShowSpeeds:
showSpeeds(index)
sys.exit()
if historyFile == "":
findRoute(index,[],{},0)
else:
readRouteResults = readRoute(historyFile)
findRoute(index,readRouteResults["route"],readRouteResults["travelled"],readRouteResults["distance"])
writeTimeDistance(timeDistance)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| StarcoderdataPython |
32993 | import numpy as np
import random
N = 10
def null(a, rtol=1e-5):
u, s, v = np.linalg.svd(a)
rank = (s > rtol*s[0]).sum()
return rank, v[rank:].T.copy()
def gen_data(N, noisy=False):
lower = -1
upper = 1
dim = 2
X = np.random.rand(dim, N)*(upper-lower)+lower
while True:
Xsample = np.concatenate(
(np.ones((1, dim)), np.random.rand(dim, dim)*(upper-lower)+lower))
k, w = null(Xsample.T)
y = np.sign(np.dot(w.T, np.concatenate((np.ones((1, N)), X))))
if np.all(y):
break
return (X, y, w)
def change_label(y):
idx = random.sample(range(1, N), N/10)
y[idx] = -y[idx]
return y
if __name__ == '__main__':
X, y, w = gen_data(10)
print(X)
| StarcoderdataPython |
3382400 | <gh_stars>0
# coding=utf-8
# Copyright 2020 <NAME>.
"""TF2 Qtran Implementation."""
# Import all packages
from catch_prey.utils import batched_index
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Lambda
from catch_prey import replay2
import tensorflow as tf
class Qtran(object):
"""A TensorFlow2 implementation of Qtran."""
def __init__(
self,
test_observations,
nmbr_episodes,
max_schedule,
nmbr_agents,
obs_shape,
action_spec,
q_bound,
num_units_q,
num_hidden_layers_q,
num_units_joint,
num_hidden_layers_joint,
num_units_v,
num_hidden_layers_v,
batch_size,
discount,
temp_max,
temp_min,
replay_capacity,
min_replay_size,
target_update_period,
optimizer_q,
optimizer_v,
optimizer_joint_q,
learn_iters,
device='cpu:*',
seed=None,
save_path=''
):
self._save_path = save_path
self._device = device
self._optimizer_q = optimizer_q
self._optimizer_v = optimizer_v
self._optimizer_joint_q = optimizer_joint_q
# Hyperparameters.
self._nmbr_agents = nmbr_agents
self._batch_size = batch_size
self._batch_size_tf = tf.constant(batch_size, dtype=tf.float32)
self._num_actions = action_spec
self._learning_updates = learn_iters
self._target_update_period = target_update_period
self._gamma = discount
self._total_steps = 0
self._eps_count = 0
self._replay = replay2.Replay(capacity=replay_capacity, num_agents=nmbr_agents, obs_shape=obs_shape[0])
self._min_replay_size = min_replay_size
self._temp_min = tf.constant(temp_min, dtype=tf.float32)
self._temp_max = tf.constant(temp_max, dtype=tf.float32)
self._temp = tf.Variable(temp_max, dtype=tf.float32)
self._max_schedule = max_schedule
self._learn_iter_counter = 0
state_shape = (nmbr_agents * 2 + 1, )
q_out_fn = Lambda(lambda x: q_bound * tf.tanh(x))
self._q_fact = Sequential()
self._q_fact.add(Dense(num_units_q, activation='relu', use_bias=True, input_shape=obs_shape))
for _ in range(num_hidden_layers_q - 1):
self._q_fact.add(Dense(num_units_q, activation='relu', use_bias=True))
self._q_fact.add(Dense(action_spec, activation=None, use_bias=True))
self._q_fact_t = Sequential()
self._q_fact_t.add(Dense(num_units_q, activation='relu', use_bias=True, input_shape=obs_shape))
for _ in range(num_hidden_layers_q - 1):
self._q_fact_t.add(Dense(num_units_q, activation='relu', use_bias=True))
self._q_fact_t.add(Dense(action_spec, activation=None, use_bias=True))
self._q_joint = Sequential()
self._q_joint.add(Dense(num_units_joint, activation='relu', use_bias=True, input_shape=tuple([state_shape[0]+nmbr_agents*action_spec])))
for _ in range(num_hidden_layers_joint - 1):
self._q_joint.add(Dense(num_units_joint, activation='relu', use_bias=True))
self._q_joint.add(Dense(1, activation=q_out_fn, use_bias=True))
self._q_joint_t = Sequential()
self._q_joint_t.add(Dense(num_units_joint, activation='relu', use_bias=True, input_shape=tuple([state_shape[0]+nmbr_agents*action_spec])))
for _ in range(num_hidden_layers_joint - 1):
self._q_joint_t.add(Dense(num_units_joint, activation='relu', use_bias=True))
self._q_joint_t.add(Dense(1, activation=q_out_fn, use_bias=True))
self._v = Sequential()
self._v.add(Dense(num_units_v, activation='relu', use_bias=True, input_shape=state_shape))
for _ in range(num_hidden_layers_v - 1):
self._v.add(Dense(num_units_v, activation='relu', use_bias=True))
self._v.add(Dense(1, use_bias=True))
@tf.function
def _learn(self, o_t, a_t, r_t, d_tp1, o_tp1):
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(self._q_fact.trainable_weights + self._q_joint.trainable_weights + self._v.trainable_weights)
distances = o_t[:, :, 0]
angles = o_t[:, :, 3]
time = o_t[:, 0, 0]
s_t = tf.concat([distances, angles, tf.expand_dims(time, axis=1)], axis=1)
distances_tp1 = o_tp1[:, :, 0]
angles_tp1 = o_tp1[:, :, 3]
time_tp1 = o_tp1[:, 0, 0]
s_tp1 = tf.concat([distances_tp1, angles_tp1, tf.expand_dims(time_tp1, axis=1)], axis=1)
# Network outputs
v_t = self._v(s_t, training=True)
one_hot_a_t = tf.reshape(tf.one_hot(a_t, self._num_actions, dtype=s_t.dtype),shape=(self._batch_size, self._num_actions * self._nmbr_agents))
q_joint = self._q_joint(tf.concat([s_t,one_hot_a_t], axis=1), training=True)
q_fact = tf.stack([self._q_fact(o_t[:, k, :], training=True) for k in range(self._nmbr_agents)], axis=1)
opt_a_t = tf.stop_gradient(tf.argmax(q_fact, axis=-1, output_type=a_t.dtype))
correct_acts = tf.math.equal(opt_a_t, a_t)
q_tilde = tf.math.reduce_sum(batched_index(q_fact, a_t), axis=1)
q_fact_tp1 = tf.stack([self._q_fact_t(o_tp1[:, k, :], training=True) for k in range(self._nmbr_agents)], axis=1)
opt_a_tp1 = tf.stop_gradient(tf.argmax(q_fact_tp1, axis=-1, output_type=a_t.dtype))
one_hot_a_tp1 = tf.reshape(tf.one_hot(opt_a_tp1, self._num_actions, dtype=s_tp1.dtype),shape=(self._batch_size, self._num_actions * self._nmbr_agents))
q_joint_tp1 = self._q_joint_t(tf.concat([s_tp1, one_hot_a_tp1], axis=1), training=True)
target = tf.stop_gradient(r_t + self._gamma * d_tp1 * q_joint_tp1)
# Calculate errors
delta_joint = q_joint - target
delta_fact = q_tilde + v_t - tf.stop_gradient(q_joint)
delta_fact_min = tf.minimum(delta_fact, 0)
# Calculate loss
c = tf.stop_gradient(tf.math.reduce_all(correct_acts, axis=1))
not_c = tf.math.logical_not(c)
c_float = tf.stop_gradient(tf.cast(c, target.dtype))
not_c_float = tf.stop_gradient(tf.cast(not_c, target.dtype))
loss_fact = tf.math.add(c_float * tf.square(delta_fact), not_c_float * tf.square(delta_fact_min), name='loss_fact')
loss_joint = tf.square(delta_joint, name='loss_td')
#Backprop
q_fact_variables_to_train = self._q_fact.trainable_weights
v_variables_to_train = self._v.trainable_weights
q_joint_variables_to_train = self._q_joint.trainable_weights
q_fact_grads = tape.gradient(loss_fact, q_fact_variables_to_train)
v_grads = tape.gradient(loss_fact, v_variables_to_train)
q_joint_grads = tape.gradient(loss_joint, q_joint_variables_to_train)
self._optimizer_q.apply_gradients(list(zip(q_fact_grads, q_fact_variables_to_train)))
self._optimizer_v.apply_gradients(list(zip(v_grads, v_variables_to_train)))
self._optimizer_joint_q.apply_gradients(list(zip(q_joint_grads, q_joint_variables_to_train)))
@tf.function
def _update_target_nets(self):
with tf.device(self._device):
source_variables = self._q_fact.trainable_weights
target_variables = self._q_fact_t.trainable_weights
for (v_s, v_t) in zip(source_variables, target_variables):
v_t.shape.assert_is_compatible_with(v_s.shape)
v_t.assign(v_s)
source_variables = self._q_joint.trainable_weights
target_variables = self._q_joint_t.trainable_weights
for (v_s, v_t) in zip(source_variables, target_variables):
v_t.shape.assert_is_compatible_with(v_s.shape)
v_t.assign(v_s)
@tf.function(input_signature=[
tf.TensorSpec(shape=(None, 7), dtype=tf.float32),
tf.TensorSpec(shape=( ), dtype=tf.bool),
tf.TensorSpec(shape=(2,), dtype=tf.int32)])
def policy(self, obs, exploit, c1_c2_games):
@tf.function
def exploratoy_policy(temp):
q_values = self._q_fact(obs)
return tf.squeeze(tf.random.categorical(tf.squeeze(q_values) / temp, 1, dtype=tf.int32))
@tf.function
def greedy_policy():
q_values = self._q_fact(obs)
return tf.squeeze(tf.argmax(q_values, axis=-1, output_type=tf.int32))
if exploit:
actions = greedy_policy()
else:
actions = exploratoy_policy(temp=tf.math.maximum(self._temp_min, self._temp_max * (1 - self._eps_count / self._max_schedule)))
return actions
def store_data(self, obs, actions, reward, discount, new_obs, active_games):
"""Stores new data in the replay buffer."""
new_data = {'old_obs': obs[active_games],
'actions': actions[active_games],
'rewards': reward[active_games],
'discount': discount[active_games],
'new_obs': new_obs[active_games]}
self._replay.add(new_data)
def update(self):
self._eps_count += 1
if self._replay.size >= self._min_replay_size:
for _l in range(self._learning_updates):
_, minibatch = self._replay.sample(self._batch_size)
tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]
self._learn(*tf_minibatch)
if self._learn_iter_counter % self._target_update_period == 0:
self._update_target_nets()
self._learn_iter_counter += 1
def load_model(self):
"""Load network weights"""
self._q_fact.load_weights(self._save_path + 'q_fact_net.h5')
self._q_fact_t.load_weights(self._save_path + 'q_fact_target_net.h5')
self._q_joint.load_weights(self._save_path + 'q_joint_net.h5')
self._q_joint_t.load_weights(self._save_path + 'q_joint_target_net.h5')
self._v.load_weights(self._save_path + 'v_net.h5')
def save_model(self):
"""Save network weights"""
self._q_fact.save_weights(self._save_path + 'q_fact_net.h5')
self._q_fact_t.save_weights(self._save_path + 'q_fact_target_net.h5')
self._q_joint.save_weights(self._save_path + 'q_joint_net.h5')
self._q_joint_t.save_weights(self._save_path + 'q_joint_target_net.h5')
self._v.save_weights(self._save_path + 'v_net.h5')
| StarcoderdataPython |
102704 | <filename>main/plugins/simsimi.py
#!/usr/bin/python
# coding=utf-8
import time
from datetime import datetime
from main import redis
from ..models import get_user_info, save_user_info
# 数据录入
def check_in(openid, benchpress,deadlift,squat,
ytxs,fwc,gwxl,shoulderpress):
current_milli_time = int(round(time.time() * 1000))
current_hour = int(datetime.fromtimestamp(
current_milli_time / 1000).strftime('%H'))
if current_hour < 6:
return '机器人还没起床,请于6点后录入'
else:
user_info = get_user_info(openid)
# 读取上次签到时间戳
if user_info:
last_reg_time = long(time.mktime(user_info[0].regtime.timetuple()) * 1000.0 + user_info[0].regtime.microsecond / 1000.0)
else:
last_reg_time = current_milli_time-864000000
# 今日凌晨的时间戳
today_dt = datetime.fromtimestamp(
current_milli_time / 1000).strftime('%Y-%m-%d')
today_timestamp = int(round(time.mktime(
datetime.strptime(today_dt, '%Y-%m-%d').timetuple()) * 1000))
# 上次签到时间大于今日凌晨的时间戳,今日已经签到过
if last_reg_time >= today_timestamp:
# 返回签到信息
return '今天已经记录过了 请不要重复记录\n回复【xx】查看记录'
else:
save_user_info(openid, benchpress, deadlift, squat,
ytxs, fwc, gwxl, shoulderpress)
return '录入成功\n回复【xx】查看近期记录'
# 显示用户数据
def show_uese_info(openid):
if redis.get('mark'):
mark = int(redis.get('mark'))
redis.set('mark', int(redis.get('mark'))+1, 30)
else:
mark = 0
redis.set('mark', 1, 30)
user_info = get_user_info(openid, n=mark)
if user_info:
m = ['%s \n卧推->%03dkg 硬拉->%03dkg\n深蹲->%03dkg 引体->%03d个\n俯卧撑->%03d个 肩推->%03dkg\n高位下拉->%03dkg\n-------------------------------' % (u.regtime.strftime("%Y-%m-%d"), u.benchpress, u.deadlift, u.squat, u.ytxs, u.fwc, u.shoulderpress, u.gwxl)
for u in user_info]
m.append('30秒继续回复xx 查看更早5条')
else:
m = ['没有发现数据', '请核实', '或者等待30S重试']
return '\n'.join(m)
| StarcoderdataPython |
1723028 | <reponame>firefly2442/aoc-2018<gh_stars>0
from modules import console
from modules.config.enums import Actions, SleepState
from modules.guard import GuardProcessor
from .state import State
from .state_machine import FiniteStateMachine
class SleepingState(State):
@staticmethod
def execute(fsm: FiniteStateMachine, line):
# console.log('State', 'Sleeping state')
processed = GuardProcessor.parse_line(line)
if processed.action == Actions.WAKING:
end_sleep = int(processed.minute)
sleep_range = end_sleep - fsm.context.started_sleeping
for index in range(fsm.context.started_sleeping, sleep_range + fsm.context.started_sleeping):
fsm.context.schedule[index] = SleepState.ASLEEP
fsm.context.started_sleeping = None
from .waking_state import WakingState
fsm.to(WakingState)
elif processed.action == Actions.NONE:
fsm.context.log.append(LogDetails(fsm.context.date, fsm.context.guard, fsm.context.schedule))
from .end_state import EndState
fsm.to(EndState)
| StarcoderdataPython |
1629511 | import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import layers
import numpy as np
import csv
import sys
import os
# Import utility functions from 'utils.py' file
from utils import checkFolders, show_variables, add_suffix, backup_configs
# Import convolution layer definitions from 'convolution layers.py' file
from convolution_layers import conv2d_layer, inception_v3, transpose_conv2d_layer, transpose_inception_v3, dense_layer, factored_conv2d, upsample
"""
U-Net model with optional KL-divergence (post-activation),
decoder-only batch-normalization, optional upsampling,
and probabilistic loss according to MVN prediction.
"""
# Encoder component of VAE model
def encoder(self, x, training=True, reuse=None, name=None):
# Unpack data
data, mesh, __ = x
if self.use_noise_injection:
interior_indices = tf.greater(mesh, 0)
zero_tensor = tf.zeros_like(data)
noisy_data = tf.distributions.Normal(loc=data, scale=self.noise_level*tf.ones_like(data), name='noisy_data').sample()
data = tf.cond(training, lambda: noisy_data, lambda: data)
data = tf.where(interior_indices, data, zero_tensor)
if not (self.alt_res == 128):
data = tf.image.resize_images(data, [self.alt_res, self.alt_res])
# [None, 64, 64, 1] --> [None, 32, 32, 16]
h1 = conv2d_layer(data, 48, kernel_size=5, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_1')#, coordconv=self.coordconv)
h1 = layers.max_pooling2d(h1, 2, 2, padding='same', data_format='channels_last', name='e_pool_1')
# [None, 32, 32, 16] --> [None, 16, 16, 32]
if self.factor:
h2 = factored_conv2d(h1, 48, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_2')
h2 = layers.max_pooling2d(h2, 2, 2, padding='same', data_format='channels_last', name='e_pool_2')
else:
h2 = conv2d_layer(h1, 48, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_2')#, coordconv=self.coordconv)
h2 = layers.max_pooling2d(h2, 2, 2, padding='same', data_format='channels_last', name='e_pool_2')
h3 = inception_v3(h2, 80, stride=1, batch_norm=False, training=training, reuse=reuse, name='e_incept_1')
# [None, 16, 16, 64] --> [None, 8, 8, 64]
h4 = conv2d_layer(h3, 80, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_3')#, coordconv=self.coordconv)
h4 = layers.max_pooling2d(h4, 2, 2, padding='same', data_format='channels_last', name='e_pool_3')
if self.use_inception:
h5 = inception_v3(h4,150, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_incept_4')
h5 = layers.max_pooling2d(h5, 2, 2, padding='same', data_format='channels_last', name='e_pool_4')
else:
h5 = conv2d_layer(h4, 150, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_4')#, coordconv=self.coordconv)#, activation=None)
h5 = layers.max_pooling2d(h5, 2, 2, padding='same', data_format='channels_last', name='e_pool_4')
chans = 512 if self.use_kl else 256
omit = True if self.use_kl else False
h6 = inception_v3(h5, chans, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='e_incept_5',
omit_activation=omit)
h6 = layers.max_pooling2d(h6, 2, 2, padding='same', data_format='channels_last', name='e_pool_5')
if self.coordconv:
h6 = conv2d_layer(h6, chans, kernel_size=2, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_6', coordconv=self.coordconv)
if not self.use_kl:
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
elif self.use_extra_dropout:
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
return h1, h2, h3, h4, h5, h6
# Decoder component of VAE model
def decoder(self, z, training=True, reuse=None, name=None):
# Note: h2 and h3 have same resolution
h1, h2, h3, h4, h5, h6 = z
# h6 ~ [None, 4, 4, 256]
h = h6
h = inception_v3(h, 256, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_incept_0')
h = tf.layers.dropout(h, rate=self.dropout_rate, training=training)
if self.coordconv:
h = conv2d_layer(h, 256, kernel_size=2, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='d_conv_0', coordconv=self.coordconv)
# [None, 4, 4, 256] --> [None, 8, 8, 128]
stride = 1 if self.interpolate else 2
h = transpose_conv2d_layer(h, 150, kernel_size=2, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_0')#, coordconv=self.coordconv)
if self.interpolate:
h = upsample(h, 4*2)
h = tf.concat([h, h5],3)
# [None, 8, 8, 64] --> [None, 16, 16, 64]
h = transpose_conv2d_layer(h, 80, kernel_size=2, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_1')#, coordconv=self.coordconv)
if self.interpolate:
h = upsample(h, 4*2*2)
h = tf.concat([h, h4],3)
# [None, 16, 16, 64] --> [None, 32, 32, 32]
if self.symmetric:
h = transpose_inception_v3(h, 80, stride=stride, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_tincept_2')
else:
h = transpose_inception_v3(h, 80, stride=stride, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_tincept_2')
if self.interpolate:
h = upsample(h, 4*2*2*2)
h = tf.concat([h, h3],3)
# [None, 32, 32, 32] --> [None, 64, 64, 16]
h_m = transpose_conv2d_layer(h, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=1, training=training, reuse=reuse, name='d_tconv_2_1')#, coordconv=self.coordconv)
h_m = transpose_conv2d_layer(h_m, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_2_2')
h_s = transpose_conv2d_layer(h, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=1, training=training, reuse=reuse, name='d_tconv_2_1_s')#, coordconv=self.coordconv)
h_s = transpose_conv2d_layer(h_s, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_2_2_s')
if self.interpolate:
h_m = upsample(h_m, 4*2*2*2*2)
h_s = upsample(h_s, 4*2*2*2*2)
#h = tf.concat([h, h1],3)
# [None, 64, 64, 16] --> [None, 128, 128, 1]
s = transpose_conv2d_layer(h_s, 1, kernel_size=6, batch_norm=False, stride=2, activation=None,
add_bias=False, training=training, reuse=reuse, name='d_tconv_3_s')
h = transpose_conv2d_layer(h_m, 1, kernel_size=6, batch_norm=False, stride=2, activation=None,
add_bias=False, training=training, reuse=reuse, name='d_tconv_3_m')
# Assign name to final output
return tf.identity(h, name=name), s
# Evaluate model on specified batch of data
def evaluate_model(self, data, reuse=None, training=True, suffix=None):
# Encode input images
z = self.encoder(self, data, training=training, reuse=reuse, name=add_suffix("encoder", suffix))
# Sample in latent spaces
if self.use_kl:
h1, h2, h3, h4, h5, h6 = z
m, log_s = tf.split(h6, num_or_size_splits=2, axis=3)
h6 = self.sampleGaussian(m, log_s, name='latent_sample')
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
z = [h1, h2, h3, h4, h5, h6]
#if self.reduce_noise:
# # Use KL divergence w.r.t. N(0, 0.1*I)
# # by comparing with 10*sigma ~ log(10*sigma) ~ log(10) + log(sigma)
# kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,tf.add(10.0*tf.ones_like(log_s),log_s))])
#else:
# kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,log_s)])
kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,log_s)])
else:
h1, h2, h3, h4, h5, h6 = z
h6 = tf.nn.leaky_relu(h6)
z = [h1, h2, h3, h4, h5, h6]
# Compute Kullback–Leibler (KL) divergence
kl_loss = self.kl_wt
# Decode latent vector back to original image
pred = self.decoder(self, z, training=training, reuse=reuse, name=add_suffix("pred", suffix))
# Compute marginal likelihood loss
masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, prob_loss = self.compute_ms_loss(data, pred, name=add_suffix("ms_loss", suffix))
# Assign names to outputs
masked_soln = tf.identity(masked_soln, name=add_suffix('masked_soln', suffix))
masked_pred = tf.identity(masked_pred, name=add_suffix('masked_pred', suffix))
masked_scale = tf.identity(masked_scale, name=add_suffix('masked_scale', suffix))
return masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, kl_loss, prob_loss
| StarcoderdataPython |
1798592 | # Imports
# ============================================================================
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import re
import os
from ansible.errors import AnsibleError
def startswith(string, prefix):
'''
>>> startswith('bigfoot', 'big')
True
'''
return string.startswith( prefix )
class TestModule(object):
'''Some string filters'''
def tests(self):
return {
'startswith': startswith,
'startwith': startswith,
}
# testing - call camel_case on first cli arg and print result
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
192127 | <reponame>NorthernForce/frisbee-to-raspberry-pi
# This class provides a way to drive a robot which has a drive train equipped
# with separate motors powering the left and right sides of a robot.
# Two different drive methods exist:
# Arcade Drive: combines 2-axes of a joystick to control steering and driving speed.
# Tank Drive: uses two joysticks to control motor speeds left and right.
# A Pololu Maestro is used to send PWM signals to left and right motor controllers.
# When using motor controllers, the maestro's speed setting can be used to tune the
# responsiveness of the robot. Low values dampen acceleration, making for a more
# stable robot. High values increase responsiveness, but can lead to a tippy robot.
# Try values around 50 to 100.
RESPONSIVENESS = 60 # this is also considered "speed"
# These are the motor controller limits, measured in Maestro units.
# These default values typically work fine and align with maestro's default limits.
# Vaules should be adjusted so that center stops the motors and the min/max values
# limit speed range you want for your robot.
MIN = 4000
CENTER = 6000
MAX = 8000
class SimpleServo:
# Pass the maestro controller object and the maestro channel numbers being used
# for the left and right motor controllers. See maestro.py on how to instantiate maestro.
def __init__(self, maestro, channel):
self.maestro = maestro
self.channel = channel
# Init motor accel/speed params
self.maestro.setAccel(self.channel, 0)
self.maestro.setSpeed(self.channel, RESPONSIVENESS)
# Motor min/center/max values
self.min = MIN
self.center = CENTER
self.max = MAX
# speed is -1.0 to 1.0
def drive(self, amount):
# convert to servo units
if (amount >= 0):
target = int(self.center + (self.max - self.center) * amount)
else:
target = int(self.center + (self.center - self.min) * amount)
self.maestro.setTarget(self.channel, target)
# Set both motors to stopped (center) position
def stop(self):
self.maestro.setAccel(self.channel, self.center)
# Close should be used when shutting down Drive object
def close(self):
self.stop()
| StarcoderdataPython |
1768257 | <reponame>flopezag/fiware-tsc-dashboard
from github import Github
from config.settings import GITHUB_TOKEN
__author__ = '<NAME>'
gh = Github(login_or_token=GITHUB_TOKEN)
repo = gh.get_user('telefonicaid').get_repo("fiware-orion")
releases = repo.get_releases()
download_count = n_assets = 0
for rel in releases:
assets = rel.raw_data['assets']
for asset in assets:
n_assets += 1
download_count += asset['download_count']
print('#assets={}, #downloads={}'.format(n_assets, download_count))
openIssues = repo.get_issues()
totalIssues = repo.get_issues(state='all')
closedIssues = len(list(totalIssues)) - len(list(openIssues))
print('Total issues (Open/Closed): {} / {}'.format(len(list(openIssues)), closedIssues))
# AUTHORS
authors = [users.author.login for users in repo.get_stats_contributors()]
reporterIssues = [users.user.login for users in totalIssues]
adopters = list(set(reporterIssues)-set(authors))
print("Total number of adopters: {}".format(len(adopters)))
# TOTAL NUMBER OF ISSUES ONLY FOR ADOPTERS
openIssuesAdopters = filter(lambda x: x.user.login in adopters, list(openIssues))
totalIssuesAdopters = filter(lambda x: x.user.login in adopters, list(totalIssues))
closedIssuesAdopters = len(totalIssuesAdopters) - len(openIssuesAdopters)
print('Total issues by adopters (Open/Closed): {} / {}'.format(len(list(openIssuesAdopters)), closedIssuesAdopters))
# COMMITS only for default branch and gh-pages
out = list()
out.append(len(list(repo.get_commits(sha=repo.default_branch))))
try:
out.append(len(list(repo.get_commits(sha='gh-pages'))))
except Exception as e:
print(e)
result = sum([i for i in out])
print("Total number of commits in default and gh-pages branches: {}".format(result))
print("Total forks: {}".format(repo.forks))
print("Total watchers: {}".format(repo.subscribers_count))
print("Total stars: {}".format(repo.watchers))
| StarcoderdataPython |
1731144 | import os
from PIL import Image
import numpy as np
from scipy.interpolate import griddata
import cv2
import argparse
def getSymXYcoordinates(iuv, resolution=256, dp_uv_lookup_256_np=None):
if dp_uv_lookup_256_np is None:
dp_uv_lookup_256_np = np.load('util/dp_uv_lookup_256.npy')
xy, xyMask = getXYcoor(iuv, resolution=resolution, dp_uv_lookup_256_np=dp_uv_lookup_256_np)
f_xy, f_xyMask = getXYcoor(flip_iuv(np.copy(iuv)), resolution=resolution, dp_uv_lookup_256_np=dp_uv_lookup_256_np)
f_xyMask = np.clip(f_xyMask-xyMask, a_min=0, a_max=1)
# combine actual + symmetric
combined_texture = xy*np.expand_dims(xyMask,2) + f_xy*np.expand_dims(f_xyMask,2)
combined_mask = np.clip(xyMask+f_xyMask, a_min=0, a_max=1)
return combined_texture, combined_mask, f_xyMask
def flip_iuv(iuv):
POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23]
i = iuv[:,:,0]
u = iuv[:,:,1]
v = iuv[:,:,2]
i_old = np.copy(i)
for part in range(24):
if (part + 1) in i_old:
annot_indices_i = i_old == (part + 1)
if POINT_LABEL_SYMMETRIES[part + 1] != part + 1:
i[annot_indices_i] = POINT_LABEL_SYMMETRIES[part + 1]
if part == 22 or part == 23 or part == 2 or part == 3 : #head and hands
u[annot_indices_i] = 255-u[annot_indices_i]
if part == 0 or part == 1: # torso
v[annot_indices_i] = 255-v[annot_indices_i]
return np.stack([i,u,v],2)
def getXYcoor(iuv, resolution=256, dp_uv_lookup_256_np=None):
x, y, u, v = mapper(iuv, resolution, dp_uv_lookup_256_np=dp_uv_lookup_256_np)
nx, ny = resolution, resolution
# get mask
uv_mask = np.zeros((ny,nx))
uv_mask[np.ceil(v).astype(int),np.ceil(u).astype(int)]=1
uv_mask[np.floor(v).astype(int),np.floor(u).astype(int)]=1
uv_mask[np.ceil(v).astype(int),np.floor(u).astype(int)]=1
uv_mask[np.floor(v).astype(int),np.ceil(u).astype(int)]=1
kernel = np.ones((3,3),np.uint8)
uv_mask_d = cv2.dilate(uv_mask, kernel, iterations=1)
# A meshgrid of pixel coordinates
X, Y = np.meshgrid(np.arange(0, nx, 1), np.arange(0, ny, 1))
YX = np.stack([Y, X], -1)
## get x,y coordinates
xy = np.stack([x, y], -1)
uv_xy = np.zeros((ny, nx, 2))
uv_mask_b = uv_mask_d.astype(bool)
uv_xy[uv_mask_b] = griddata((v, u), xy, YX[uv_mask_b], method='linear')
nan_mask = np.isnan(uv_xy) & uv_mask_b[:, :, None]
uv_xy[nan_mask] = griddata((v, u), xy, YX[nan_mask], method='nearest').reshape(-1)
return uv_xy, uv_mask_d
def mapper(iuv, resolution=256, dp_uv_lookup_256_np=None):
H, W, _ = iuv.shape
iuv_mask = iuv[:, :, 0] > 0
iuv_raw = iuv[iuv_mask].astype(int)
x = np.linspace(0, W-1, W, dtype=int)
y = np.linspace(0, H-1, H, dtype=int)
xx, yy = np.meshgrid(x, y)
xx_rgb = xx[iuv_mask]
yy_rgb = yy[iuv_mask]
# modify i to start from 0... 0-23
i = iuv_raw[:, 0] - 1
u = iuv_raw[:, 1]
v = iuv_raw[:, 2]
uv_smpl = dp_uv_lookup_256_np[i, v, u]
u_f = uv_smpl[:, 0] * (resolution - 1)
v_f = (1 - uv_smpl[:, 1]) * (resolution - 1)
return xx_rgb, yy_rgb, u_f, v_f
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--image_file', type=str, help="path to image file to process. ex: ./train.lst")
parser.add_argument("--save_path", type=str, help="path to save the uv data")
parser.add_argument("--dp_path", type=str, help="path to densepose data")
args = parser.parse_args()
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
images = []
f = open(args.image_file, 'r')
for lines in f:
lines = lines.strip()
images.append(lines)
for i in range(len(images)):
im_name = images[i]
print ('%d/%d'%(i+1, len(images)))
dp = os.path.join(args.dp_path, im_name.split('.')[0]+'_iuv.png')
iuv = np.array(Image.open(dp))
h, w, _ = iuv.shape
if np.sum(iuv[:,:,0]==0)==(h*w):
print ('no human: invalid image %d: %s'%(i, im_name))
else:
uv_coor, uv_mask, uv_symm_mask = getSymXYcoordinates(iuv, resolution = 512)
np.save(os.path.join(args.save_path, '%s_uv_coor.npy'%(im_name.split('.')[0])), uv_coor)
mask_im = Image.fromarray((uv_mask*255).astype(np.uint8))
mask_im.save(os.path.join(args.save_path, im_name.split('.')[0]+'_uv_mask.png'))
mask_im = Image.fromarray((uv_symm_mask*255).astype(np.uint8))
mask_im.save(os.path.join(args.save_path, im_name.split('.')[0]+'_uv_symm_mask.png'))
| StarcoderdataPython |
73873 | <filename>test_op_detect.py
import unittest
import operation_detection
class TestOpDetect(unittest.TestCase):
def testIsListTrue(self):
test_string = "(a b)"
ret_val = operation_detection.isList(test_string)
self.assertTrue(ret_val)
def testIsListFalse(self):
test_string = "(+ a b)"
ret_val = operation_detection.isList(test_string)
self.assertFalse(ret_val)
def testIsListEmpty(self):
test_string = "()"
ret_val = operation_detection.isList(test_string)
self.assertTrue(ret_val)
def testIsMathTrue(self):
test_string = "(+ a b)"
ret_val = operation_detection.isMath(test_string)
self.assertTrue(ret_val)
def testIsMathFalse(self):
test_string = "(cons a b)"
ret_val = operation_detection.isMath(test_string)
self.assertFalse(ret_val) | StarcoderdataPython |
7122 | <filename>src/ITN/srmg/core/RiemannianRight.py
#!/usr/bin/env python
# coding=utf-8
'''
Author: <NAME> / Yulv
Email: <EMAIL>
Date: 2022-03-19 10:33:38
Motto: Entities should not be multiplied unnecessarily.
LastEditors: <NAME>
LastEditTime: 2022-03-23 00:52:55
FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/core/RiemannianRight.py
Description: Modify here please
Init from https://github.com/yuanwei1989/plane-detection Author: <NAME> (3 Oct 2018)
# Copyright (c) 2006-2017, <NAME>, <NAME>, <NAME>
# Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine
# Produced at Biomedical Image Analysis Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
Statistics on Riemannian Manifolds and Groups
---------------------------------------------
This is a set of codes to compare the computing of the different types of means on Lie groups.
These codes can be used to reproduce the experiments illustrated in the video developed for the
MICCAI Educational challenge 2014, available at: url of the video.
:Authors:
`<NAME> <website>`
`<NAME> <website>`
:Organization:
Asclepios Team, INRIA Sophia Antipolis.
:Version:
2017.07.05
Requirements
------------
* `Numpy 1.11 <http://www.numpy.org>`_
Notes
-----
----------
(1) Defining a mean on Lie group.
<NAME>. Medical Imaging. 2013. <hal-00938320>
'''
import numpy
import math
from srmg.common.group import *
from srmg.common.util import *
EPS = 1e-5
def riemExpR(a,f0,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from any point f0 (for left- and right-invariant metric)
"""
f = grpCompose((riemExpIdR(a, numpy.linalg.lstsq(jR(f0),v)[0])), f0)
return f
def riemExpIdR(a,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from Id (for left- and right-invariant metric)
"""
v=grpReg(-v);
f = numpy.zeros(6)
f[0:3] = v[0:3]
f[3:6] = a * v[3:6]
f = grpInv(f)
return f
def sigma2R(a,m,tabf,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating variance requires at least 2 points')
return 0
s = 0
for i in range(0,siz):
s = s + tabw[i] * normA2R(a,m,riemLogR(a,m,tabf[i,:]));
return s
def riemLogR(a,f0,f):
"""
DESCRIPTION
Attributes:
a: ?????
f0: ????
f: ????
Return:
v: ?????
"""
v=numpy.dot(jR(f0),riemLogIdR(a,grpCompose(f,grpInv(f0))))
return v
def riemLogIdR(a,f):
"""
DESCRIPTION
Attributes:
a: ?????
f: ????
Return:
v: ?????
"""
v = numpy.zeros(6)
v[0:3] = f[0:3]
v[3:6] = numpy.dot(rotMat(-f[0:3]),f[3:6]);
return v
def qR(a,f):
"""
Left- and right- invariant inner product in the principal chart (propagation of Frobenius inner product)
Attributes:
a: ?????
f: ????
Return:
g: ?????
"""
f = grpReg(f)
g0 = numpy.zeros([6,6])
g0[0:3,0:3] = numpy.eye(3)
g0[3:6,3:6] = a * numpy.eye(3)
g = numpy.dot(numpy.dot(numpy.linalg.inv(jR(f).T) , g0) , numpy.linalg.inv(jR(f)))
return g
def jR(f):
"""
Differentials of the left and right translations for SO(3) in the principal chart
Attributes:
r: ?????
Return:
Jl: ?????
"""
#f = makeColVector(f,6); # unnecessary if 1D
f = grpReg(f);
Jr = numpy.zeros([6,6])
Jr[0:3,0:3] = jRotR(f[0:3]);
Jr[3:6,0:3] = -skew(f[3:6]);
Jr[3:6,3:6] = numpy.eye(3);
return Jr
def normA2R(a,f,v):
"""
This function calculates the normalised left
Attributes:
a: ?????
f: ?????
v: ?????
Return:
n: normalised vector
"""
v=grpReg(v);
n=numpy.dot(numpy.dot(v.T,qR(a,f)),v);
return n
def frechetR(a,tabf,tabw):
"""
This function computes the frechet-L mean
Attributes:
img: The fixed image that will be transformed (simpleitk type)
a: ?????
tabf: SE3 data points (Nx6 vector)
tabw: data point weights (Nx1 vector)
Return:
m: The mean
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating mean requires at least 2 points')
m = tabf[0,:]
# Iteration 0
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
# Iteration 1 until converges
while (normA2R(a,mbis,riemLogR(a,mbis,m))>EPS*sigma2R(a,mbis,tabf,tabw)):
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
return m
| StarcoderdataPython |
67234 | <reponame>atksh/datasets
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for INaturalist dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.image import inaturalist
class INaturalist2017Test(testing.DatasetBuilderTestCase):
DATASET_CLASS = inaturalist.INaturalist2017
SPLITS = { # Expected number of examples on each split.
"train": 4,
"validation": 3,
"test": 2,
}
DL_EXTRACT_RESULT = {
"test_images": "test2017.tar.gz",
"trainval_annos": "train_val2017",
"trainval_images": "train_val_images.tar.gz",
}
if __name__ == "__main__":
testing.test_main()
| StarcoderdataPython |
1724148 | import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
class TrfJitter(BaseEstimator, TransformerMixin):
def __init__(self, snrdb, p=1, verbose=0):
self.snrdb = snrdb
self.snr = 10 ** (self.snrdb/10)
self.p = p
self.verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
output = np.copy(X)
q = np.random.rand(X.shape[0]) < self.p
if self.verbose:
print('Jitter: ', q)
for i in range(X.shape[0]):
if q[i]:
output[i] = self.jitter(X[i])
if y is not None:
return output, y
else:
return output
def jitter(self, x):
Xp = np.sum(x**2, axis=0, keepdims=True) / x.shape[0]
Np = Xp / self.snr
n = np.random.normal(size=x.shape, scale=np.sqrt(Np), loc=0.0)
return x + n
class TrfMagWarp(BaseEstimator, TransformerMixin):
def __init__(self, sigma, p=1, verbose=0):
self.sigma = sigma
self.p = p
self.verbose = verbose
self.knot = 4
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
output = np.copy(X)
q = np.random.rand(X.shape[0]) < self.p
if self.verbose:
print('Warp: ', q)
for i in range(X.shape[0]):
if q[i]:
output[i] = self.mag_warp(X[i])
if y is not None:
return output, y
else:
return output
def mag_warp(self, x):
def _generate_random_curve(x, sigma=0.2, knot=4):
# knot = max(0, min(knot, x.shape[0]-2))
xx = np.arange(0, x.shape[0], (x.shape[0]-1)//(knot+1)).transpose()
yy = np.random.normal(loc=1.0, scale=sigma, size=(knot+2,))
x_range = np.arange(x.shape[0])
cs = CubicSpline(xx[:], yy[:])
return np.array(cs(x_range)).transpose()
output = np.zeros(x.shape)
for i in range(x.shape[1]):
rc = _generate_random_curve(x[:,i], self.sigma, self.knot)
output[:,i] = x[:,i] * rc
return output
| StarcoderdataPython |
1652542 | #!/usr/bin/python
def imageVel(east_grd_path):
import matplotlib;
import matplotlib.pyplot;
import os;
from scipy.io import netcdf;
assert os.path.exists(east_grd_path), "\n***** ERROR: " + east_grd_path + " does not exist\n";
north_grd_path = east_grd_path.replace("east", "north");
mag_grd_path = east_grd_path.replace("eastxyz", "mag");
f = netcdf.netcdf_file(east_grd_path,"r",False);
x = f.variables["x"].data;
y = f.variables["y"].data;
eastvel = f.variables["z"].data[:];
f.close();
f = netcdf.netcdf_file(north_grd_path,"r",False);
x = f.variables["x"].data;
y = f.variables["y"].data;
northvel = f.variables["z"].data[:];
f.close();
f = netcdf.netcdf_file(mag_grd_path,"r",False);
x = f.variables["x"].data;
y = f.variables["y"].data;
speed = f.variables["z"].data[:];
f.close();
# matplotlib.pyplot.imshow(speed[1380:1440, 760:820], interpolation='nearest', origin='lower');
matplotlib.pyplot.streamplot(x[760:820], y[1380:1440], eastvel[1380:1440, 760:820], northvel[1380:1440, 760:820], color=speed[1380:1440, 760:820], linewidth=2);
matplotlib.pyplot.colorbar();
matplotlib.pyplot.show();
# matplotlib.pyplot.imshow(speed, interpolation='nearest', origin='lower');
# matplotlib.pyplot.show();
return;
if __name__ == "__main__":
import os;
import sys;
assert len(sys.argv) > 1, "\n***** ERROR: imageVel.py requires one argument, " + str(len(sys.argv)) + " given\n";
assert os.path.exists(sys.argv[1]), "\n***** ERROR: " + sys.argv[1] + " does not exist\n";
imageVel(sys.argv[1]);
exit();
| StarcoderdataPython |
1736678 | <gh_stars>1-10
import glob
import math
import os
import pickle as pkl
import random
import shutil
import datetime
from collections import deque
from hashlib import sha1
from os.path import join, isfile
import cv2
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
import networkx as nx
from algorithms.utils.algo_utils import EPS
from utils.graph import visualize_graph_tensorboard, plot_graph
from utils.timing import Timing
from utils.utils import ensure_contigious, log, ensure_dir_exists, AttrDict
def hash_observation(o):
"""Not the fastest way to do it, but plenty fast enough for our purposes."""
o = ensure_contigious(o)
return sha1(o).hexdigest()
def get_position(info):
pos = None
if info is not None:
pos = info.get('pos')
if pos is not None:
pos = (pos['agent_x'], pos['agent_y'])
return pos
def get_angle(info):
angle = None
if info is not None:
pos = info.get('pos')
if pos is not None:
angle = pos['agent_a']
return angle
class TopologicalMap:
def __init__(self, initial_obs, directed_graph, initial_info=None, verbose=False):
self._verbose = verbose
# whether we add edges in both directions or not (directions are always treated separately, hence DiGraph)
self.directed_graph = directed_graph
self.graph = nx.DiGraph()
self.curr_landmark_idx = 0
self.path_so_far = [0] # full path traversed during the last or current episode
# variables needed for online localization
self.new_landmark_candidate_frames = 0
self.loop_closure_candidate_frames = 0
self.closest_landmarks = []
# number of trajectories that were used to build the map (used in TMAX)
self.num_trajectories = 0
# index map from frame index in a trajectory to node index in the resulting map
self.frame_to_node_idx = dict()
self.reset(initial_obs, initial_info)
@staticmethod
def create_empty():
return TopologicalMap(np.array(0), directed_graph=False)
def _add_new_node(self, obs, pos, angle, value_estimate=0.0, num_samples=1, node_id=None):
if node_id is not None:
new_landmark_idx = node_id
else:
if self.num_landmarks() <= 0:
new_landmark_idx = 0
else:
new_landmark_idx = max(self.graph.nodes) + 1
assert new_landmark_idx not in self.graph.nodes
hash_ = hash_observation(obs)
self.graph.add_node(
new_landmark_idx,
obs=obs, hash=hash_, pos=pos, angle=angle,
value_estimate=value_estimate, num_samples=num_samples, path=(new_landmark_idx,),
)
return new_landmark_idx
def _node_set_path(self, idx):
self.graph.nodes[idx]['path'] = tuple(self.path_so_far)
def reset(self, obs, info=None):
"""Create the graph with only one vertex."""
self.graph.clear()
self.curr_landmark_idx = self._add_new_node(obs=obs, pos=get_position(info), angle=get_angle(info))
assert self.curr_landmark_idx == 0
self.frame_to_node_idx[0] = [0]
self.new_episode()
def new_episode(self):
self.new_landmark_candidate_frames = 0
self.loop_closure_candidate_frames = 0
self.closest_landmarks = []
self.curr_landmark_idx = 0 # assuming we're being put into the exact same starting spot every time
self.graph.nodes[self.curr_landmark_idx]['added_at'] = 0
self.path_so_far = [0]
def relabel_nodes(self):
"""Make sure nodes are labeled from 0 to n-1."""
self.graph = nx.convert_node_labels_to_integers(self.graph)
def _log_verbose(self, msg, *args):
if not self._verbose:
return
log.debug(msg, *args)
@property
def curr_landmark_obs(self):
return self.get_observation(self.curr_landmark_idx)
# noinspection PyUnresolvedReferences
def get_observation(self, landmark_idx):
return self.graph.node[landmark_idx]['obs']
# noinspection PyUnresolvedReferences
def get_hash(self, landmark_idx):
return self.graph.node[landmark_idx]['hash']
# noinspection PyUnresolvedReferences
def get_info(self, landmark_idx):
x = y = angle = 0
try:
x, y = self.graph.node[landmark_idx]['pos']
angle = self.graph.node[landmark_idx]['angle']
except (KeyError, TypeError):
log.warning(f'No coordinate information in landmark {landmark_idx}')
pos = {
'agent_x': x, 'agent_y': y, 'agent_a': angle,
}
return {'pos': pos}
def neighbors(self, landmark_idx):
return list(nx.neighbors(self.graph, landmark_idx))
def neighborhood(self):
neighbors = [self.curr_landmark_idx]
neighbors.extend(self.neighbors(self.curr_landmark_idx))
return neighbors
def reachable_indices(self, start_idx):
"""Run BFS from current landmark to find the list of landmarks reachable from the current landmark."""
d = [start_idx]
d.extend(nx.descendants(self.graph, start_idx))
return d
def non_neighbors(self, landmark_idx):
return list(nx.non_neighbors(self.graph, landmark_idx))
def curr_non_neighbors(self):
return self.non_neighbors(self.curr_landmark_idx)
def set_curr_landmark(self, landmark_idx):
"""Replace current landmark with the given landmark. Create necessary edges if needed."""
if landmark_idx == self.curr_landmark_idx:
return
if landmark_idx not in self.neighborhood():
# create new edges, we found a loop closure!
self.add_edge(self.curr_landmark_idx, landmark_idx, loop_closure=True)
self._log_verbose('Change current landmark to %d', landmark_idx)
self.curr_landmark_idx = landmark_idx
self.path_so_far.append(landmark_idx)
def add_landmark(self, obs, info=None, update_curr_landmark=False, action=None):
new_landmark_idx = self._add_new_node(obs=obs, pos=get_position(info), angle=get_angle(info))
self.add_edge(self.curr_landmark_idx, new_landmark_idx)
self._log_verbose('Added new landmark %d', new_landmark_idx)
if update_curr_landmark:
prev_landmark_idx = self.curr_landmark_idx
self.set_curr_landmark(new_landmark_idx)
self._node_set_path(new_landmark_idx)
assert self.path_so_far[-1] == new_landmark_idx
if prev_landmark_idx != self.curr_landmark_idx and action is not None:
self.graph.adj[prev_landmark_idx][self.curr_landmark_idx]['action'] = action
return new_landmark_idx
def add_edge(self, i1, i2, loop_closure=False):
initial_success = 0.01 # add to params?
if i2 in self.graph[i1]:
log.warning('Edge %d-%d already exists (%r)! Overriding!', i1, i2, self.graph[i1])
self.graph.add_edge(
i1, i2,
success=initial_success, last_traversal_frames=math.inf, attempted_traverse=0,
loop_closure=loop_closure,
)
if not self.directed_graph:
if i1 in self.graph[i2]:
log.warning('Edge %d-%d already exists (%r)! Overriding!', i2, i1, self.graph[i2])
self.graph.add_edge(
i2, i1,
success=initial_success, last_traversal_frames=math.inf, attempted_traverse=0,
loop_closure=loop_closure,
)
def _remove_edge(self, i1, i2):
if i2 in self.graph[i1]:
self.graph.remove_edge(i1, i2)
if not self.directed_graph:
if i1 in self.graph[i2]:
self.graph.remove_edge(i2, i1)
def remove_edges_from(self, edges):
for e in edges:
self._remove_edge(*e)
def remove_unreachable_vertices(self, from_idx):
reachable_targets = self.reachable_indices(from_idx)
remove_vertices = []
for target_idx in self.graph.nodes():
if target_idx not in reachable_targets:
remove_vertices.append(target_idx)
assert len(remove_vertices) < self.num_landmarks()
self.graph.remove_nodes_from(remove_vertices)
def num_edges(self):
"""Helper function for summaries."""
return self.graph.number_of_edges()
def num_landmarks(self):
return self.graph.number_of_nodes()
def update_edge_traversal(self, i1, i2, success, frames):
"""Update traversal information only for one direction."""
learning_rate = 0.2
prev_success = self.graph[i1][i2]['success']
self.graph[i1][i2]['success'] = (1 - learning_rate) * prev_success + learning_rate * success
self.graph[i1][i2]['last_traversal_frames'] = frames
# noinspection PyUnusedLocal
@staticmethod
def edge_weight(i1, i2, d, max_probability=1.0):
success_prob = d['success']
success_prob = max(EPS, success_prob)
success_prob = min(max_probability, success_prob)
return -math.log(success_prob) # weight of the edge is neg. log probability of traversal success
def get_path(self, from_idx, to_idx, edge_weight=None):
if edge_weight is None:
edge_weight = self.edge_weight
try:
return nx.dijkstra_path(self.graph, from_idx, to_idx, weight=edge_weight)
except nx.exception.NetworkXNoPath:
return None
def path_lengths(self, from_idx):
return nx.shortest_path_length(self.graph, from_idx, weight=self.edge_weight)
def topological_distances(self, from_idx):
return nx.shortest_path_length(self.graph, from_idx)
def topological_neighborhood(self, idx, max_dist):
"""Return set of vertices that are within [0, max_dist] of idx."""
ego_graph = nx.ego_graph(self.graph, idx, max_dist)
neighbors = list(ego_graph.nodes)
return neighbors
def distances_from(self, another_map):
"""
Calculate topological distances from all nodes in another map (usually submap) to nodes in this map.
For all nodes in the intersection of graphs the distance should be 0.
Solved using BFS (probably there's an algorithm in NX for this).
"""
q = deque(another_map.graph.nodes)
distances = {node: 0 for node in another_map.graph.nodes}
while len(q) > 0:
node = q.popleft()
if node not in self.graph:
continue
for adj_node in list(self.graph.adj[node]):
if adj_node in distances:
continue
distances[adj_node] = distances[node] + 1
q.append(adj_node)
return distances
def get_cut_from(self, another_map):
"""
Return set of edges (cut) that completely separates current map from another_map (usually subgraph).
"""
distances = self.distances_from(another_map)
surrounding_vertices = [node for node, d in distances.items() if d == 1]
cut_edges = []
for v in surrounding_vertices:
for adj_v in self.graph.adj[v]:
assert distances[v] == 1
if adj_v in another_map.graph:
assert distances[adj_v] == 0
cut_edges.append((adj_v, v))
return cut_edges
@property
def labeled_graph(self):
g = self.graph.copy()
labels = {i: str(i) for i in g.nodes}
g = nx.relabel_nodes(g, labels)
return g
def save_checkpoint(
self, checkpoint_dir, map_img=None, coord_limits=None, num_to_keep=2, is_sparse=False, verbose=False,
):
"""Verbose mode also dumps all the landmark observations and the graph structure into the directory."""
t = Timing()
with t.timeit('map_checkpoint'):
results = AttrDict()
prefix = '.map_'
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S-%f")
dir_name = f'{prefix}{timestamp}'
map_dir = join(checkpoint_dir, dir_name)
if os.path.isdir(map_dir):
log.warning('Warning: map checkpoint %s already exists! Overwriting...')
shutil.rmtree(map_dir)
map_dir = ensure_dir_exists(map_dir)
with open(join(map_dir, 'topo_map.pkl'), 'wb') as fobj:
pkl.dump(self.__dict__, fobj, 2)
if verbose:
map_extra = ensure_dir_exists(join(map_dir, '.map_verbose'))
for node in self.graph.nodes:
obs = self.get_observation(node)
obs_bgr = cv2.cvtColor(obs, cv2.COLOR_RGB2BGR)
obs_bgr_bigger = cv2.resize(obs_bgr, (420, 420), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(join(map_extra, f'{node:03d}.jpg'), obs_bgr_bigger)
figure = plot_graph(
self.graph,
layout='pos', map_img=map_img, limits=coord_limits, topological_map=True, is_sparse=is_sparse,
)
graph_filename = join(map_extra, 'graph.png')
with open(graph_filename, 'wb') as graph_fobj:
plt.savefig(graph_fobj, format='png')
figure.clear()
results.graph_filename = graph_filename
assert num_to_keep > 0
previous_checkpoints = glob.glob(f'{checkpoint_dir}/{prefix}*')
previous_checkpoints.sort()
previous_checkpoints = deque(previous_checkpoints)
while len(previous_checkpoints) > num_to_keep:
checkpoint_to_delete = previous_checkpoints[0]
log.info('Deleting old map checkpoint %s', checkpoint_to_delete)
shutil.rmtree(checkpoint_to_delete)
previous_checkpoints.popleft()
log.info('Map save checkpoint took %s', t)
return results
def maybe_load_checkpoint(self, checkpoint_dir):
prefix = '.map_'
all_map_checkpoints = glob.glob(f'{checkpoint_dir}/{prefix}*')
if len(all_map_checkpoints) <= 0:
log.debug('No map checkpoints found, starting from empty map')
return
all_map_checkpoints.sort()
latest_checkpoint = all_map_checkpoints[-1]
fname = 'topo_map.pkl'
full_path = join(latest_checkpoint, fname)
if not isfile(full_path):
return
log.debug('Load env map from file %s', full_path)
with open(full_path, 'rb') as fobj:
topo_map_dict = pkl.load(fobj)
self.load_dict(topo_map_dict)
def load_dict(self, topo_map_dict):
self.__dict__.update(topo_map_dict)
def map_summaries(maps, env_steps, summary_writer, section, map_img=None, coord_limits=None, is_sparse=False):
if None in maps:
return
# summaries related to episodic memory (maps)
num_landmarks = [m.num_landmarks() for m in maps]
num_edges = [m.num_edges() for m in maps]
num_neighbors = []
for m in maps:
node = random.choice(list(m.graph.nodes))
num_neighbors.append(len(m.neighbors(node)))
avg_num_landmarks = sum(num_landmarks) / len(num_landmarks)
avg_num_neighbors = sum(num_neighbors) / len(num_neighbors)
avg_num_edges = sum(num_edges) / len(num_edges)
summary = tf.Summary()
def curiosity_summary(tag, value):
summary.value.add(tag=f'{section}/{tag}', simple_value=float(value))
curiosity_summary('avg_landmarks', avg_num_landmarks)
curiosity_summary('max_landmarks', max(num_landmarks))
curiosity_summary('avg_neighbors', avg_num_neighbors)
curiosity_summary('max_neighbors', max(num_neighbors))
curiosity_summary('avg_edges', avg_num_edges)
curiosity_summary('max_edges', max(num_edges))
summary_writer.add_summary(summary, env_steps)
num_maps_to_plot = min(2, len(maps))
maps_for_summary = random.sample(maps, num_maps_to_plot)
max_graph_idx = 0
for i, m in enumerate(maps):
if m.num_landmarks() > maps[max_graph_idx].num_landmarks():
max_graph_idx = i
max_graph_summary = visualize_graph_tensorboard(
maps[max_graph_idx].labeled_graph,
tag=f'{section}/max_graph', map_img=map_img, coord_limits=coord_limits, is_sparse=is_sparse,
)
summary_writer.add_summary(max_graph_summary, env_steps)
if len(maps) > 1:
for i, map_for_summary in enumerate(maps_for_summary):
random_graph_summary = visualize_graph_tensorboard(
map_for_summary.labeled_graph,
tag=f'{section}/random_graph_{i}',
map_img=map_img, coord_limits=coord_limits, is_sparse=is_sparse,
)
summary_writer.add_summary(random_graph_summary, env_steps)
| StarcoderdataPython |
100874 | <reponame>ellisonch/kinc
import sys
n = sys.argv[1]
s = 0
while (not(n <= 0)):
s = s + n
n = n + -1
print s
| StarcoderdataPython |
1654766 | <filename>algo/lis.py
arr = [1, 6, 3, 5, 9, 7]
ans = [1]
for i in range(1, len(arr)):
t = []
for j in range(i):
if arr[i] > arr[j]:
t.append(ans[j]+1)
else:
t.append(ans[j])
ans.append(max(t))
print max(ans)
| StarcoderdataPython |
4808414 | <filename>service/service_voice_authenticator.py
import sys
import os
import json
import time
from pathlib import Path
from breaker_core.datasource.jsonqueue import Jsonqueue
from breaker_core.datasource.bytessource import Bytessource
from breaker_core.common.service_jsonqueue import ServiceJsonqueue
from breaker_audio.voice_authenticator import VoiceAuthenticator
from breaker_audio.tools_audio_io import ToolsAudioIO
class ServiceVoiceAuthenticator(ServiceJsonqueue):
def __init__(self, config_breaker, queue_request, mode_debug, path_dir_data) -> None:
super().__init__(config_breaker, queue_request, mode_debug)
self.path_dir_data = path_dir_data
self.authenticator = VoiceAuthenticator(path_dir_data)
def process_request(self, request:dict) -> 'dict':
type_request = request['type_request']
if type_request == 'encode':
print('encode')
bytessource_sound = Bytessource.from_dict(self.config_breaker, request['bytessource_voice_sound'])
bytessource_encoding = Bytessource.from_dict(self.config_breaker, request['bytessource_voice_encoding'])
signal_voice, sampling_rate_voice = ToolsAudioIO.bytearray_wav_to_signal(bytessource_sound.read())
array_encoding = self.authenticator.encode(signal_voice, sampling_rate_voice)
bytessource_encoding.write_pickle(array_encoding)
return {'was_processed':True}
elif type_request == 'authenticate':
print('authenticate')
bytessource_sound = Bytessource.from_dict(self.config_breaker, request['bytessource_voice_sound'])
bytessource_encoding_dir = Bytessource.from_dict(self.config_breaker, request['bytessource_voice_encoding_dir'])
signal_voice, sampling_rate_voice = ToolsAudioIO.bytearray_wav_to_signal(bytessource_sound.read())
encoding_a = self.authenticator.encode(signal_voice, sampling_rate_voice)
list_list_key = bytessource_encoding_dir.list_shallow()
list_encoding_b = []
for list_key in list_list_key:
list_encoding_b.append(bytessource_encoding_dir.join(list_key).read_pickle())
authentication_report = self.authenticator.authenticate(encoding_a, list_encoding_b)
return {'was_processed':True, 'authentication_report':authentication_report}
else:
return {'was_processed':False, 'message':'Unknown type_request: ' + type_request}
if __name__ == '__main__':
path_file_config_breaker = Path(os.getenv('PATH_FILE_CONFIG_BREAKER', '/config/config.cfg'))
path_dir_data = Path(os.getenv('PATH_DIR_DATA_BREAKER', '/data/data_breaker/' ))
mode_debug = True
with open(path_file_config_breaker, 'r') as file:
config_breaker = json.load(file)
jsonqueue_request = Jsonqueue.from_dict(config_breaker, config_breaker['queue_request_voice_authenticator'])
if not jsonqueue_request.exists():
jsonqueue_request.create()
service = ServiceVoiceAuthenticator(config_breaker, jsonqueue_request, mode_debug, path_dir_data)
service.run()
| StarcoderdataPython |
3345565 | <reponame>xxiro/UBC_Triumf_Workshop
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dwave.system import EmbeddingComposite, DWaveSampler
import dwave.inspector as inspector
'''
******** Ferromagnetic chain ********
The goals of this exercise are:
1. Get comfortable with submitting problems to the QPU and visualize the solutions using
the problem inspector
2. Explore the probabilistic nature of the solutions
3. Explore the physical qubit parameters (local h biases and inter-qubit couplings J) in a
linear chain
Suggested exercises:
1. What do you see if you run the problem multiple times?
What about if you increase the number of reads?
2. Make an anti-ferromagnetic chain by changing the sign of fm_coupler_strength
3. What happens if you add a linear bias (h) on one end of the chain? This can be
done in either the ferromagnetic or antiferromagnetic case. How does the
strength of that bias affect your results? (for example, try h = 0.1, 0.5 and 1.0.
What happens to the number of solutions in each state and the energies?)
4. What happens if you multiply all of the h and J biases by the same factor (2x, 5x, 10x)?
Do your solutions change? What about their energies?
5. What happens if you ferromagnetically couple the chain, and impose opposite
h bias on each end of the chain? What if those biases have different magnitude?
'''
# Modifiable parameters
num_qubits = 8 # Number of qubits in our chain
fm_qubit_bias = [0] * num_qubits # List of biases to apply to each qubit in our chain
fm_coupler_strength = -1 # The coupling we want to apply to two adjacent qubits
num_reads = 20 # The number of times the QPU is sampled
# Ising model parameters
h = fm_qubit_bias
J_PeriodicBC = {} # coupling strength is specified using a dictionary
# HW 3.c-d Periodic BC
for i in range(num_qubits-1):
J_PeriodicBC[(i, i+1)] = fm_coupler_strength
J_PeriodicBC[(0,num_qubits-1)] = fm_coupler_strength
# J_PeriodicBC[(num_qubits-1,0)] = fm_coupler_strength
# HW 3.e Chimera graph
J_Chimera = {}
left = [i for i in range(num_qubits) if not i%2]
right = [i for i in range(num_qubits) if i%2]
for i in left:
for j in right:
J_Chimera[(i,j)] = fm_coupler_strength
# HW 3.f Chimera graph
#J_Chimera[(0,2)] = fm_coupler_strength
# HW 3.g Clique
J_Clique = {}
qubit_list = list(range(num_qubits))
for i in range(num_qubits):
qubit_list.remove(i)
for j in qubit_list:
J_Clique[(i,j)] = fm_coupler_strength
# Submit the problem to the QPU
sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
response = sampler.sample_ising(h, J_Chimera, num_reads=num_reads)
# Show the problem visualization on the QPU
inspector.show(response)
print("QPU response")
print(response) | StarcoderdataPython |
1686124 | import os
from typing import List
from typing import Tuple
import logging
from collections import defaultdict
from collections import Counter
import json
import torch
import numpy as np
from GroundedScan.dataset import GroundedScan
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger = logging.getLogger(__name__)
class Vocabulary(object):
"""
Object that maps words in string form to indices to be processed by numerical models.
"""
def __init__(self, sos_token="<SOS>", eos_token="<EOS>", pad_token="<PAD>"):
"""
NB: <PAD> token is by construction idx 0.
"""
self.sos_token = sos_token
self.eos_token = eos_token
self.pad_token = pad_token
self._idx_to_word = [pad_token, sos_token, eos_token]
self._word_to_idx = defaultdict(lambda: self._idx_to_word.index(self.pad_token))
self._word_to_idx[sos_token] = 1
self._word_to_idx[eos_token] = 2
self._word_frequencies = Counter()
def word_to_idx(self, word: str) -> int:
return self._word_to_idx[word]
def idx_to_word(self, idx: int) -> str:
return self._idx_to_word[idx]
def contains_word(self, word: str) -> bool:
if self._word_to_idx[word] != 0:
return True
else:
return False
def add_sentence(self, sentence: List[str]):
for word in sentence:
if word not in self._word_to_idx:
self._word_to_idx[word] = self.size
self._idx_to_word.append(word)
self._word_frequencies[word] += 1
def most_common(self, n=10):
return self._word_frequencies.most_common(n=n)
@property
def pad_idx(self):
return self.word_to_idx(self.pad_token)
@property
def sos_idx(self):
return self.word_to_idx(self.sos_token)
@property
def eos_idx(self):
return self.word_to_idx(self.eos_token)
@property
def size(self):
return len(self._idx_to_word)
@classmethod
def load(cls, path: str):
assert os.path.exists(path), "Trying to load a vocabulary from a non-existing file {}".format(path)
with open(path, 'r') as infile:
all_data = json.load(infile)
sos_token = all_data["sos_token"]
eos_token = all_data["eos_token"]
pad_token = all_data["pad_token"]
vocab = cls(sos_token=sos_token, eos_token=eos_token, pad_token=pad_token)
vocab._idx_to_word = all_data["idx_to_word"]
vocab._word_to_idx = defaultdict(int)
for word, idx in all_data["word_to_idx"].items():
vocab._word_to_idx[word] = idx
vocab._word_frequencies = Counter(all_data["word_frequencies"])
return vocab
def to_dict(self) -> dict:
return {
"sos_token": self.sos_token,
"eos_token": self.eos_token,
"pad_token": self.pad_token,
"idx_to_word": self._idx_to_word,
"word_to_idx": self._word_to_idx,
"word_frequencies": self._word_frequencies
}
def save(self, path: str) -> str:
with open(path, 'w') as outfile:
json.dump(self.to_dict(), outfile, indent=4)
return path
class GroundedScanDataset(object):
"""
Loads a GroundedScan instance from a specified location.
"""
def __init__(self, path_to_data: str, save_directory: str, k: int, upsample_isolated=100, split="train",
input_vocabulary_file="", target_vocabulary_file="", generate_vocabulary=False,
isolate_examples_with="cautiously", simplified_objective=False):
assert os.path.exists(path_to_data), "Trying to read a gSCAN dataset from a non-existing file {}.".format(
path_to_data)
self.simplified_objective = simplified_objective
assert not simplified_objective, "Simplified objective for debugging purposes only."
if not generate_vocabulary:
assert os.path.exists(os.path.join(save_directory, input_vocabulary_file)) and os.path.exists(
os.path.join(save_directory, target_vocabulary_file)), \
"Trying to load vocabularies from non-existing files."
if split == "test" and generate_vocabulary:
logger.warning("WARNING: generating a vocabulary from the test set.")
self.dataset = GroundedScan.load_dataset_from_file(path_to_data, save_directory=save_directory, k=k,
upsample_isolated=upsample_isolated,
isolate_examples_with=isolate_examples_with)
if self.dataset._data_statistics.get("adverb_1"):
logger.info("Verb-adverb combinations in training set: ")
for adverb, items in self.dataset._data_statistics["train"]["verb_adverb_combinations"].items():
logger.info("Verbs for adverb: {}".format(adverb))
for key, count in items.items():
logger.info(" {}: {} occurrences.".format(key, count))
logger.info("Verb-adverb combinations in dev set: ")
for adverb, items in self.dataset._data_statistics["dev"]["verb_adverb_combinations"].items():
logger.info("Verbs for adverb: {}".format(adverb))
for key, count in items.items():
logger.info(" {}: {} occurrences.".format(key, count))
actual_k = self.dataset._data_statistics["train"]["manners_in_command"][isolate_examples_with]
expected_k = k * upsample_isolated
if split in ["train", "dev"]:
assert actual_k == expected_k, \
"Chose k=%d and upsample=%d (expected k=%d) but actual number of examples with %s in training set is %d." % (
k, upsample_isolated, expected_k, isolate_examples_with, actual_k
)
self.image_dimensions = None
self.image_channels = 16
self.split = split
self.directory = save_directory
# Keeping track of data.
self._examples = np.array([])
self._input_lengths = np.array([])
self._target_lengths = np.array([])
if generate_vocabulary:
logger.info("Generating vocabularies...")
self.input_vocabulary = Vocabulary()
self.target_vocabulary = Vocabulary()
self.read_vocabularies()
logger.info("Done generating vocabularies.")
else:
logger.info("Loading vocabularies...")
self.input_vocabulary = Vocabulary.load(os.path.join(save_directory, input_vocabulary_file))
self.target_vocabulary = Vocabulary.load(os.path.join(save_directory, target_vocabulary_file))
logger.info("Done loading vocabularies.")
def convert_target_to_simple(self, example):
verb_in_command = example["input_command"][0]
adverb_in_command = example["input_command"][-1]
if adverb_in_command not in ["while spinning", "while zigzagging", "cautiously", "hesitantly"]:
adverb_in_command = ""
if verb_in_command == "push" or verb_in_command == "pull":
interactions = [command for command in example["target_command"] if command == verb_in_command]
else:
interactions = []
interaction_target = []
if verb_in_command not in interaction_target:
interaction_target += interactions
if adverb_in_command == "while zigzagging":
interaction_target = interactions
return interaction_target
def read_vocabularies(self) -> {}:
"""
Loop over all examples in the dataset and add the words in them to the vocabularies.
"""
logger.info("Populating vocabulary...")
for i, example in enumerate(self.dataset.get_examples_with_image(self.split,
simple_situation_representation=True)):
self.input_vocabulary.add_sentence(example["input_command"])
if not self.simplified_objective:
self.target_vocabulary.add_sentence(example["target_command"])
else:
interaction_target = self.convert_target_to_simple(example)
self.target_vocabulary.add_sentence(interaction_target)
def save_vocabularies(self, input_vocabulary_file: str, target_vocabulary_file: str):
self.input_vocabulary.save(os.path.join(self.directory, input_vocabulary_file))
self.target_vocabulary.save(os.path.join(self.directory, target_vocabulary_file))
def get_vocabulary(self, vocabulary: str) -> Vocabulary:
if vocabulary == "input":
vocab = self.input_vocabulary
elif vocabulary == "target":
vocab = self.target_vocabulary
else:
raise ValueError("Specified unknown vocabulary in sentence_to_array: {}".format(vocabulary))
return vocab
def shuffle_data(self) -> {}:
"""
Reorder the data examples and reorder the lengths of the input and target commands accordingly.
"""
random_permutation = np.random.permutation(len(self._examples))
self._examples = self._examples[random_permutation]
self._target_lengths = self._target_lengths[random_permutation]
self._input_lengths = self._input_lengths[random_permutation]
def get_data_iterator(self, batch_size=None, max_examples=None,
simple_situation_representation=True, shuffle=False) -> {}:
"""
Loop over the data examples in GroundedScan and convert them to tensors, also save the lengths
for input and target sequences that are needed for padding.
:param batch_size
:param max_examples: how many examples to read maximally, read all if None.
:param simple_situation_representation: whether to read the full situation image in RGB or the simplified
:param shuffle:
smaller representation.
"""
assert isinstance(batch_size, int), "Provide a batch size."
logger.info("Converting dataset to tensors...")
current_examples_batch = np.array([])
current_input_lengths = np.array([])
current_target_lengths = np.array([])
for i, example in enumerate(self.dataset.get_examples_with_image(self.split,
shuffle=shuffle,
simple_situation_representation=simple_situation_representation,
adverb_inputs=False)):
if max_examples:
if len(self._examples) > max_examples:
return
empty_example = {}
input_commands = example["input_command"]
if not self.simplified_objective:
target_commands = example["target_command"]
else:
target_commands = self.convert_target_to_simple(example)
example_information = {
# "adverb": example["adverb"],
# "type_adverb": example["type_adverb"],
"original_input": input_commands,
"original_output": target_commands,
"gscan_final_target": example["target_command"],
# "verb_in_command": example["verb_in_command"],
"derivation_representation": example["derivation_representation"],
"situation_representation": example["situation_representation"]
}
input_array = self.sentence_to_array(input_commands, vocabulary="input")
target_array = self.sentence_to_array(target_commands, vocabulary="target")
empty_example["input_tensor"] = torch.tensor(input_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["target_tensor"] = torch.tensor(target_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["situation_image"] = torch.tensor(example["situation_image"],
dtype=torch.float, device=device).unsqueeze(dim=0)
empty_example["example_information"] = example_information
current_input_lengths = np.append(current_input_lengths, [len(input_array)])
current_target_lengths = np.append(current_target_lengths, [len(target_array)])
current_examples_batch = np.append(current_examples_batch, [empty_example])
if len(current_examples_batch) == batch_size:
yield self.make_batch(current_examples_batch, current_input_lengths, current_target_lengths)
current_examples_batch = np.array([])
current_input_lengths = np.array([])
current_target_lengths = np.array([])
def make_batch(self, examples, input_lengths, target_lengths) -> Tuple[torch.Tensor, List[int],
torch.Tensor, List[dict],
torch.Tensor, List[int]]:
"""
Iterate over batches of example tensors, pad them to the max length in the batch and yield.
:param batch_size: how many examples to put in each batch.
:return: tuple of input commands batch, corresponding input lengths, adverb batch,
target commands batch and corresponding target lengths.
"""
max_input_length = np.max(input_lengths)
max_target_length = np.max(target_lengths)
input_batch = []
adverb_batch = []
target_batch = []
situation_representation_batch = []
derivation_representation_batch = []
agent_positions_batch = []
target_positions_batch = []
situation_batch = []
original_input_batch = []
original_output_batch = []
verb_in_command_batch = []
adverb_type_batch = []
gscan_final_targets_batch = []
for example in examples:
to_pad_input = max_input_length - example["input_tensor"].size(1)
to_pad_target = max_target_length - example["target_tensor"].size(1)
padded_input = torch.cat([
example["input_tensor"],
torch.zeros(int(to_pad_input), dtype=torch.long, device=device).unsqueeze(0)], dim=1)
padded_target = torch.cat([
example["target_tensor"],
torch.zeros(int(to_pad_target), dtype=torch.long, device=device).unsqueeze(0)], dim=1)
input_batch.append(padded_input)
target_batch.append(padded_target)
# adverb_batch.append(example["adverb_input"])
situation_repr = example["example_information"]["situation_representation"]
situation_representation_batch.append(situation_repr)
situation_batch.append(example["situation_image"])
agent_position = torch.tensor(
(int(situation_repr["agent_position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["agent_position"]["column"]), dtype=torch.long,
device=device).unsqueeze(dim=0)
agent_positions_batch.append(agent_position)
target_position = torch.tensor(
(int(situation_repr["target_object"]["position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["target_object"]["position"]["column"]),
dtype=torch.long, device=device).unsqueeze(dim=0)
target_positions_batch.append(target_position)
# adverb_type_batch.append(example["example_information"]["type_adverb"])
derivation_representation_batch.append(example["example_information"]["derivation_representation"])
# original_input_batch.append(example["example_information"]["original_input"])
# original_output_batch.append(example["example_information"]["original_output"])
# verb_in_command_batch.append(example["example_information"]["verb_in_command"])
# gscan_final_targets_batch.append(example["example_information"]["gscan_final_target"])
return (torch.cat(input_batch, dim=0), input_lengths, derivation_representation_batch,
torch.cat(situation_batch, dim=0), situation_representation_batch, torch.cat(target_batch, dim=0),
target_lengths, torch.cat(agent_positions_batch, dim=0), torch.cat(target_positions_batch, dim=0))
def read_dataset(self, max_examples=None, simple_situation_representation=True) -> {}:
"""
Loop over the data examples in GroundedScan and convert them to tensors, also save the lengths
for input and target sequences that are needed for padding.
:param max_examples: how many examples to read maximally, read all if None.
:param simple_situation_representation: whether to read the full situation image in RGB or the simplified
smaller representation.
"""
logger.info("Converting dataset to tensors...")
for i, example in enumerate(self.dataset.get_examples_with_image(self.split, simple_situation_representation)):
if max_examples:
if len(self._examples) > max_examples:
return
empty_example = {}
input_commands = example["input_command"]
target_commands = example["target_command"]
#equivalent_target_commands = example["equivalent_target_command"]
situation_image = example["situation_image"]
if i == 0:
self.image_dimensions = situation_image.shape[0]
self.image_channels = situation_image.shape[-1]
situation_repr = example["situation_representation"]
input_array = self.sentence_to_array(input_commands, vocabulary="input")
target_array = self.sentence_to_array(target_commands, vocabulary="target")
#equivalent_target_array = self.sentence_to_array(equivalent_target_commands, vocabulary="target")
empty_example["input_tensor"] = torch.tensor(input_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["target_tensor"] = torch.tensor(target_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
#empty_example["equivalent_target_tensor"] = torch.tensor(equivalent_target_array, dtype=torch.long,
# device=device).unsqueeze(dim=0)
empty_example["situation_tensor"] = torch.tensor(situation_image, dtype=torch.float, device=device
).unsqueeze(dim=0)
empty_example["situation_representation"] = situation_repr
empty_example["derivation_representation"] = example["derivation_representation"]
empty_example["agent_position"] = torch.tensor(
(int(situation_repr["agent_position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["agent_position"]["column"]), dtype=torch.long,
device=device).unsqueeze(dim=0)
empty_example["target_position"] = torch.tensor(
(int(situation_repr["target_object"]["position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["target_object"]["position"]["column"]),
dtype=torch.long, device=device).unsqueeze(dim=0)
self._input_lengths = np.append(self._input_lengths, [len(input_array)])
self._target_lengths = np.append(self._target_lengths, [len(target_array)])
self._examples = np.append(self._examples, [empty_example])
def sentence_to_array(self, sentence: List[str], vocabulary: str) -> List[int]:
"""
Convert each string word in a sentence to the corresponding integer from the vocabulary and append
a start-of-sequence and end-of-sequence token.
:param sentence: the sentence in words (strings)
:param vocabulary: whether to use the input or target vocabulary.
:return: the sentence in integers.
"""
vocab = self.get_vocabulary(vocabulary)
sentence_array = [vocab.sos_idx]
for word in sentence:
sentence_array.append(vocab.word_to_idx(word))
sentence_array.append(vocab.eos_idx)
return sentence_array
def array_to_sentence(self, sentence_array: List[int], vocabulary: str) -> List[str]:
"""
Translate each integer in a sentence array to the corresponding word.
:param sentence_array: array with integers representing words from the vocabulary.
:param vocabulary: whether to use the input or target vocabulary.
:return: the sentence in words.
"""
vocab = self.get_vocabulary(vocabulary)
return [vocab.idx_to_word(word_idx) for word_idx in sentence_array]
@property
def num_examples(self):
return len(self._examples)
@property
def input_vocabulary_size(self):
return self.input_vocabulary.size
@property
def target_vocabulary_size(self):
return self.target_vocabulary.size
| StarcoderdataPython |
3315574 | import cv2
import numpy as np
import os
from utils import *
class CircleDetector:
def __init__(self):
pass
class SimpleTemplateDetector:
def __init__(self, symdir="data/data_SI/symbols_png"):
self.symdir = symdir
symbols = [
os.path.join(symdir, x) for x in os.listdir(symdir)
if x not in ["Pipe.png", "Terminal.png"]
]
self.templates = [
(os.path.basename(s), cv2.imread(s))
for s in symbols]
def __call__(self, data, **kwargs):
img = data["raw_image"]
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h0, w0 = img_gray.shape
predictions = dict()
for name, template_basic in self.templates:
template_basic = shrink_template(template_basic)
#if not np.any(template_basic == 0):
# continue
transforms = [
template_basic,
#np.fliplr(template_basic),
#np.flipud(template_basic)
]
transforms = [
np.rot90(t, i) for i in [0, 1, 2, 3] for t in transforms
]
transforms = unique_transforms(transforms)
for template in transforms:
h, w = template.shape[:2]
#print(template.shape, img_gray.shape)
res = cv2.matchTemplate(img.copy(), template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
#print(res[loc])
for pt in zip(*loc[::-1]):
if not name in predictions:
predictions[name] = []
#cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,255,0), 2)
predictions[name].append(list(pt) + [pt[0] + w, pt[1] + h])
predictions = {
k: non_max_suppression_fast(np.asarray(v), 0.1)
for k, v in predictions.items()
}
data['nodes'] = predictions
return data
class ConflictResolver:
"resolves conflict between nodes"
def __init__(self):
pass
def __call__(self, data, **kwargs):
pass
| StarcoderdataPython |
87394 | from utils.object_detection import *
from utils.pose_estimation import *
from utils.utils import *
| StarcoderdataPython |
77476 | """Initial models
Revision ID: b41a0816fcda
Revises:
Create Date: 2020-04-14 07:29:41.924866
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('effect',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.Unicode(length=64), nullable=False),
sa.Column('prop', sa.UnicodeText(), nullable=False),
sa.Column('raw_duration', sa.UnicodeText(), nullable=True),
sa.Column('start', sa.UnicodeText(), nullable=False),
sa.Column('end', sa.UnicodeText(), nullable=True),
sa.Column('done', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_effect_name'), 'effect', ['name'], unique=False)
op.create_table('effect_group',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.Unicode(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_effect_group_name'), 'effect_group', ['name'], unique=False)
op.create_table('effect_stack',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.Unicode(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_effect_stack_name'), 'effect_stack', ['name'], unique=False)
op.create_table('effect_group_to_effect_stack',
sa.Column('effect_group_id', sa.Integer(), nullable=False),
sa.Column('effect_stack_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['effect_group_id'], ['effect_group.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['effect_stack_id'], ['effect_stack.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('effect_group_id', 'effect_stack_id')
)
op.create_table('effect_to_effect_group',
sa.Column('effect_id', sa.Integer(), nullable=False),
sa.Column('effect_group_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['effect_group_id'], ['effect_group.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['effect_id'], ['effect.id'], onupdate='CASCADE', ondelete='RESTRICT'),
sa.PrimaryKeyConstraint('effect_id', 'effect_group_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('effect_to_effect_group')
op.drop_table('effect_group_to_effect_stack')
op.drop_index(op.f('ix_effect_stack_name'), table_name='effect_stack')
op.drop_table('effect_stack')
op.drop_index(op.f('ix_effect_group_name'), table_name='effect_group')
op.drop_table('effect_group')
op.drop_index(op.f('ix_effect_name'), table_name='effect')
op.drop_table('effect')
# ### end Alembic commands ###
| StarcoderdataPython |
111241 | <gh_stars>0
class A187:
pass
| StarcoderdataPython |
3394046 | <gh_stars>10-100
import uuid
from functools import lru_cache
from io import BytesIO
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse_lazy
from django.views.generic import TemplateView, FormView, View
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView
from magic import Magic
from PIL import Image, ImageFont
from .forms import TextOverlayForm, WatermarkForm, SteganographyForm, ItemForm
from .models import Item
from .processors import add_text_overlay, add_watermark, lsb_encode, lsb_decode
@lru_cache(maxsize=1)
def _get_placeholder_image_bytes():
with open(settings.PLACEHOLDER_IMAGE, 'rb') as fp:
return fp.read()
def _create_result_id():
return uuid.uuid4().hex
def _get_cache_key(prefix, result_id):
return '{prefix}-image-{result_id}'.format(prefix=prefix, result_id=result_id)
def _get_source_image_key(result_id):
return _get_cache_key('source', result_id)
def _get_result_image_key(result_id):
return _get_cache_key('result', result_id)
def _save_image(key, image, format_='png'):
bytes_io = BytesIO()
image.save(bytes_io, format=format_)
cache.set(key, bytes_io.getvalue())
def _save_source_image(image, result_id, format_=None):
_save_image(_get_source_image_key(result_id), image, format_=format_ if format_ is not None else image.format)
def _save_result_image(image, result_id, format_='png'):
_save_image(_get_result_image_key(result_id), image, format_=format_)
def _get_image_fp(key):
image_bytes = cache.get(key)
if image_bytes is None:
image_bytes = _get_placeholder_image_bytes()
image_fp = BytesIO(image_bytes)
return image_fp
def _get_image(key):
return Image.open(_get_image_fp(key))
class TextOverlay(FormView):
template_name = 'items/text_overlay.html'
form_class = TextOverlayForm
def form_valid(self, form):
text = form.cleaned_data['text']
image = Image.open(form.cleaned_data['image'])
result_image = add_text_overlay(image, text)
result_id = _create_result_id()
_save_source_image(image, result_id)
_save_result_image(result_image, result_id)
return HttpResponseRedirect(reverse_lazy('text-overlay-result', kwargs={'result_id': result_id}))
text_overlay = TextOverlay.as_view()
class TextOverlayResult(TemplateView):
template_name = 'items/text_overlay_result.html'
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
result_id = kwargs.get('result_id', 'unknown')
context_data['source_image_src'] = reverse_lazy('cached-image',
kwargs={'key': _get_source_image_key(result_id)})
context_data['result_image_src'] = reverse_lazy('cached-image',
kwargs={'key': _get_result_image_key(result_id)})
return context_data
text_overlay_result = TextOverlayResult.as_view()
class Watermark(FormView):
template_name = 'items/watermark.html'
form_class = WatermarkForm
def form_valid(self, form):
image = Image.open(form.cleaned_data['image'])
watermark_image = Image.open(form.cleaned_data['watermark_image'])
result_image = add_watermark(image, watermark_image)
result_id = _create_result_id()
_save_source_image(image, result_id)
_save_result_image(result_image, result_id)
return HttpResponseRedirect(reverse_lazy('watermark-result', kwargs={'result_id': result_id}))
watermark = Watermark.as_view()
class WatermarkResult(TemplateView):
template_name = 'items/watermark_result.html'
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
result_id = kwargs.get('result_id', 'unknown')
context_data['source_image_src'] = reverse_lazy('cached-image',
kwargs={'key': _get_source_image_key(result_id)})
context_data['result_image_src'] = reverse_lazy('cached-image',
kwargs={'key': _get_result_image_key(result_id)})
return context_data
watermark_result = WatermarkResult.as_view()
class Steganography(FormView):
template_name = 'items/steganography.html'
form_class = SteganographyForm
def form_valid(self, form):
text = form.cleaned_data['text']
image = Image.open(form.cleaned_data['image'])
result_image = lsb_encode(text, image)
result_id = _create_result_id()
_save_source_image(image, result_id)
_save_result_image(result_image, result_id)
return HttpResponseRedirect(reverse_lazy('steganography-result', kwargs={'result_id': result_id}))
steganography = Steganography.as_view()
class SteganographyResult(TemplateView):
template_name = 'items/steganography_result.html'
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
result_id = kwargs.get('result_id', 'unknown')
context_data['source_image_src'] = reverse_lazy('cached-image',
kwargs={'key': _get_source_image_key(result_id)})
context_data['result_image_src'] = reverse_lazy('cached-image',
kwargs={'key': _get_result_image_key(result_id)})
result_image = _get_image(_get_result_image_key(result_id))
text = lsb_decode(result_image)
context_data['text'] = text
return context_data
steganography_result = SteganographyResult.as_view()
class CachedImage(View):
def get(self, request, key=None, **kwargs):
image_fp = _get_image_fp(key)
magic = Magic(mime=True)
content_type = magic.from_buffer(image_fp.read(1024))
image_fp.seek(0)
return HttpResponse(image_fp, content_type=content_type)
cached_image = CachedImage.as_view()
class ItemDetail(DetailView):
model = Item
item_detail = ItemDetail.as_view()
class ItemCreate(CreateView):
model = Item
form_class = ItemForm
item_create = ItemCreate.as_view()
| StarcoderdataPython |
184826 | from model.gcn import GCN
from torch import nn
import torch
if __name__ == '__main__':
gcn = GCN(1, 1024)
node = torch.randn((2, 10, 1024))
labels = torch.cat([torch.ones((2, 4)), torch.zeros((2, 6))], dim=-1)
coords = torch.randn((2, 10, 4))
image_shapes = (224, 224)
node_list, human_node_list, unary_list = [], [], []
for node_i, coords_i, labels_i in zip(node, coords, labels):
node, human_node, unary, x_keep, y_keep = gcn(
labels_i, node_i, coords_i, image_shapes)
print(node.shape, human_node.shape, unary.shape)
node_list.append(node)
human_node_list.append(human_node)
unary_list.append(unary)
| StarcoderdataPython |
3303111 | <reponame>almonds0166/BCN
import sys; sys.path.append("../")
from pathlib import Path
import re
from matplotlib import pyplot as plt
import numpy as np
from bcn import Results, Dataset, Connections
#from bcn.branches import DirectOnly
#from bcn.branches.uniform import (NearestNeighbor, NearestNeighborOnly,
# NextToNN, NextToNNOnly)
#from bcn.branches.informed import Kappa, IndirectOnly
from plotutils import (
ABBREVIATIONS, ABBREVIATIONS_SAFE, save_fig,
HIGH_CONTRAST
)
BATCH_SIZE = 64
PERCENT = 10
DATASET = (Dataset.MNIST,)
CONNECTIONS = (Connections.ONE_TO_9,)
BRANCHES = ("DirectOnly",)
HEIGHT = (16,)
DEPTH = (3,)
TRIAL = (2,)
WP_PATH = Path(input("Enter the location of all your *WP* results\n> "))
WP_PATH /= f"{PERCENT}percent" # get subfolder with only the desired results
# plot config
BLUE, RED, YELLOW = HIGH_CONTRAST
LW = 3
FONT_SIZE = 20
plt.rcParams.update({'font.size': FONT_SIZE})
def main():
for height in HEIGHT:
for depth in DEPTH:
for dataset in DATASET:
for connections in CONNECTIONS:
for branches in BRANCHES:
for trial in TRIAL:
make_plot(height, depth, dataset, connections, branches, trial)
def make_plot(height, depth, dataset, connections, branches, trial, *, show=False):
h = height; w = h; d = depth
b = ABBREVIATIONS[branches]
safe_b = ABBREVIATIONS_SAFE[branches]
c = connections.value
fname_wp = (
f"results_{h}x{w}x{d}@{c}-"
f"{branches}.{dataset.name}.b{BATCH_SIZE}.t{trial}.pkl"
)
fname_sgd = (
f"results_{h}x{w}x{d}@{c}-"
f"{branches}.{dataset.name}.b{BATCH_SIZE}.t{trial}o.pkl"
)
loc_wp = WP_PATH / fname_wp
loc_sgd = WP_PATH / fname_sgd
assert loc_wp.exists(), f"wp file {fname_wp!r} must exist"
assert loc_sgd.exists(), f"sgd file {fname_sgd!r} must exist"
fig, axes = plt.subplots(
2, 2,
gridspec_kw={"width_ratios": [1, 1.618]},
figsize=(16,9),
)
# baseline results
r = Results()
r.load(loc_wp)
wp_steps = r.step
tl = r.train_losses[:100]
f1 = r.f1_scores[:101]
f1_before_fault = f1[-1]
axes[0,0].plot(tl, color=BLUE, linewidth=LW, label="SGD")
axes[1,0].plot(f1, color=BLUE, linewidth=LW)
# wp recovery
wp_tl = r.train_losses[100:]
wp_f1 = r.f1_scores[101:]
f1_after_fault = wp_f1[0]
f1_after_recovery = wp_f1[-1]
axes[0,1].plot(wp_tl, color=RED, linewidth=LW, label="WP")
axes[1,1].plot(wp_f1, color=RED, linewidth=LW)
axes[0,1].text(0, wp_tl[0]+.05, f"{wp_tl[0]:.2f}", ha="left", va="bottom")
axes[0,1].text(len(wp_tl), wp_tl[-1]+.05, f"{wp_tl[-1]:.2f}", ha="right", va="bottom")
axes[1,1].text(0, wp_f1[0]-.05, f"{100*wp_f1[0]:.1f}%", ha="left", va="top")
axes[1,1].text(len(wp_f1), wp_f1[-1]-.05, f"{100*wp_f1[-1]:.1f}%", ha="right", va="top")
# sgd best possible
r = Results()
r.load(loc_sgd)
best_tl = min(r.train_losses)
best_f1 = max(r.f1_scores)
f1_rel_recovery = (f1_after_recovery - f1_after_fault) / (best_f1 - f1_after_fault)
axes[0,1].axhline(best_tl, linestyle="--", color=YELLOW, linewidth=LW, label="SGD")
axes[1,1].axhline(best_f1, linestyle="--", color=YELLOW, linewidth=LW)
# tidy up
axes[0,0].set_xticks(())
axes[0,1].set_xticks(())
axes[0,1].set_yticks(())
axes[1,1].set_yticks(())
axes[1,0].set_xlabel("Epochs")
axes[1,1].set_xlabel("Perturbation steps")
axes[0,0].set_ylabel("Train loss")
axes[1,0].set_ylabel("$F_1$ score")
axes[0,0].set_title("Baseline")
axes[0,1].set_title("Recovery from fault")
min_y_tl = min(min(tl), min(wp_tl), best_tl)
max_y_tl = max(max(tl), max(wp_tl), best_tl)
min_y_f1 = min(min(f1), min(wp_f1), best_f1)
max_y_f1 = max(max(f1), max(wp_f1), best_f1)
ylim_tl = (min_y_tl-.1, 1.05*max_y_tl)
ylim_f1 = (0, 1.05)
axes[0,0].set_ylim(ylim_tl)
axes[0,1].set_ylim(ylim_tl)
axes[1,0].set_ylim(ylim_f1)
axes[1,1].set_ylim(ylim_f1)
axes[0,0].legend()
axes[0,1].legend()
title = f"{h}x{w}x{d} 1-to-{c} with {b} branches, trial {trial}, recovery on {dataset.value}"
plt.suptitle(title)
# render
fig.tight_layout()
fname = f"{h}x{w}x{d}@{c}-{safe_b}.{dataset.name}.t{trial}.png"
save_fig(plt, f"fig_wp_scores/{PERCENT}percent/{safe_b}/", fname, show)
# code
short_caption = (
f"Recovery from fault by weight perturbation, "
f"for {h}x{w}x{d} with {b} branches, trial {trial}"
)
caption = (
f"Model recovery from {PERCENT}\\% applied fault by {wp_steps} steps of "
f"weight perturbation, for a {h}x{w}x{d} BCN with {b} branches (trial {trial}), "
f"evaluated on {dataset.value}. "
f"(a) $F_1$ score recovered from {100*f1_after_fault:.1f}\\% to "
f"{100*f1_after_recovery:.1f}\\%, a change of {f1_rel_recovery:+.2f} "
f"relative to the reference. "
f"(b) Connected core (\\textcolor{{bluecomment}}{{blue}}), and locations of dead neurons "
f"(\\textcolor{{redcomment}}{{red}})."
)
lines = [
"% Generated by ``scripts/fig_wp_scores.py``",
"\\begin{figure}[h]",
"\\centering",
"\\begin{subfigure}[b]{\\textwidth}",
"\\centering",
f"\\includegraphics[width=\\textwidth]{{{h}x{w}x{d}@{c}-{safe_b}.{dataset.name}.t{trial}.png}}",
"\\caption{Recovery curves}",
f"\\label{{fig:recovery_{h}x{w}x{d}@{c}-{safe_b}.{dataset.name}.t{trial}:scores}}",
"\\end{subfigure}",
"\\hfill",
"\\begin{subfigure}[b]{\\textwidth}",
"\\centering",
f"\\includegraphics[width=\\textwidth]{{{h}x{w}x{d}@{c}-{safe_b}.{dataset.name}.t{trial}.fault.png}}",
"\\caption{Connected core and fault}",
f"\\label{{fig:recovery_{h}x{w}x{d}@{c}-{safe_b}.{dataset.name}.t{trial}:fault}}",
"\\end{subfigure}",
f"\\caption[{short_caption}]{{{caption}}}",
f"\\label{{fig:recovery_{h}x{w}x{d}@{c}-{safe_b}.{dataset.name}.t{trial}}}",
"\\end{figure}",
]
code = "\n".join(lines)
print("```latex")
print(code)
print("```")
return code
if __name__ == "__main__":
_ = main() | StarcoderdataPython |
1732621 | #
# Tests for the lithium-ion half-cell SPMe model
# This is achieved by using the {"working electrode": "positive"} option
#
import pybamm
import unittest
from tests import BaseUnitTestLithiumIonHalfCell
class TestSPMeHalfCell(BaseUnitTestLithiumIonHalfCell, unittest.TestCase):
def setUp(self):
self.model = pybamm.lithium_ion.SPMe
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| StarcoderdataPython |
1772905 | <reponame>dexy/cashew
### "import"
from example.classes import Data
### "plugins"
Data.plugins
import example.classes1
Data.plugins
### "example-data"
example_data = [{
"foo" : 123,
"bar" : 456
}]
### "json-example-type"
json_data = Data.create_instance('json', example_data)
type(json_data)
### "csv-example"
csv_data = Data.create_instance('csv', example_data)
csv_data.present()
### "json-example"
json_data = Data.create_instance('json', example_data)
json_data.present()
| StarcoderdataPython |
3324675 | <filename>Energy Transport/Heat Diffusion/simple_1d_transient_diffusion.py<gh_stars>0
from math import *
import matplotlib.pyplot as plt
from matplotlib import style
style.use('seaborn')
from live_plot import LivePlot
live_plot = LivePlot(window_title='Unidimensional Transient Diffusion', xlabel='Width', ylabel='Temperature')
def rod_mean(rod):
new_rod = []
for i, value in enumerate(rod):
if i == 0:
new_rod.append((value + rod[i+1])/2)
elif i == len(rod) - 1:
new_rod.append((rod[i-1] + value)/2)
else:
new_rod.append((rod[i-1] + rod[i+1])/2)
return new_rod
n_points = 100
rod = [sin(2*pi*(i/n_points)) + (1/2)*sin(20*(i/n_points)) for i in range(n_points)]
x_rod = [i for i in range(len(rod))]
for i in range(150):
live_plot.plot(x_rod, rod, ylim=[-2, 2])
for k in range(1):
rod = rod_mean(rod)
| StarcoderdataPython |
186433 | """
An underground railway system is keeping track of customer travel times between different stations.
They are using this data to calculate the average time it takes to travel from one station to another.
Implement the UndergroundSystem class:
- void checkIn(int id, string stationName, int t)
A customer with a card ID equal to id, checks in at the station stationName at time t.
A customer can only be checked into one place at a time.
- void checkOut(int id, string stationName, int t)
A customer with a card ID equal to id, checks out from the station stationName at time t.
- double getAverageTime(string startStation, string endStation)
Returns the average time it takes to travel from startStation to endStation.
The average time is computed from all the previous traveling times from startStation to endStation that happened
directly, meaning a check in at startStation followed by a check out from endStation.
- The time it takes to travel from startStation to endStation may be different from the time it takes to travel from
endStation to startStation.
There will be at least 1 customer that has traveled from startStation to endStation before getAverageTime is called.
You may assume all calls to the checkIn and checkOut methods are consistent. If a customer checks in at time t1 then
checks out at time t2, then t1 < t2. All events happen in chronological order.
"""
import collections
class UndergroundSystem:
"""
Runtime: 244 ms, faster than 51.66% of Python3
Memory Usage: 23.9 MB, less than 87.54% of Python3
Time complexity: O(1)
Space complexity: O(P+S^2), where S is the number of stations on the network, and P is the number of passengers
making a journey concurrently during peak time.
"""
def __init__(self):
self.user_itineraries = {} # <UserId, [StationId, CheckInTime]>
self.travel_times = collections.defaultdict(list) # <StartStationId-EndStationId, [TravelDuration]>
def checkIn(self, id: int, stationName: str, t: int) -> None:
self.user_itineraries[id] = (stationName, t)
def checkOut(self, id: int, stationName: str, t: int) -> None:
if id in self.user_itineraries:
from_station, start_time = self.user_itineraries.pop(id)
direction = self.get_direction_key(from_station, stationName)
self.travel_times[direction].append(t - start_time)
def getAverageTime(self, startStation: str, endStation: str) -> float:
direction = self.get_direction_key(startStation, endStation)
times = self.travel_times[direction]
return sum(times) / len(times)
def get_direction_key(self, from_station: str, to_station: str) -> str:
return f"{from_station}-{to_station}"
if __name__ == '__main__':
u_sys_1 = UndergroundSystem()
u_sys_1.checkIn(45, "Leyton", 3)
u_sys_1.checkIn(32, "Paradise", 8)
u_sys_1.checkIn(27, "Leyton", 10)
u_sys_1.checkOut(45, "Waterloo", 15) # Customer 45 "Leyton" -> "Waterloo" in 15-3 = 12
u_sys_1.checkOut(27, "Waterloo", 20) # Customer 27 "Leyton" -> "Waterloo" in 20-10 = 10
u_sys_1.checkOut(32, "Cambridge", 22) # Customer 32 "Paradise" -> "Cambridge" in 22-8 = 14
assert u_sys_1.getAverageTime("Paradise", "Cambridge") == 14 # One trip "Paradise" -> "Cambridge", (14) / 1 = 14
assert u_sys_1.getAverageTime("Leyton", "Waterloo") == 11 # Two trips "Leyton" -> "Waterloo", (10 + 12) / 2 = 11
u_sys_1.checkIn(10, "Leyton", 24)
assert u_sys_1.getAverageTime("Leyton", "Waterloo") == 11 # return 11.0
u_sys_1.checkOut(10, "Waterloo", 38) # Customer 10 "Leyton" -> "Waterloo" in 38-24 = 14
assert u_sys_1.getAverageTime("Leyton", "Waterloo") == 12 # Three trips "Leyton"->"Waterloo", (10+12+14) / 3 = 12
| StarcoderdataPython |
107667 | from pathlib import Path
import numpy as np
import pandas as pd
import nibabel as nib
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
color_tables_dir = Path(__file__).parent
class Parcellation:
def __init__(self, parcellation_path):
self.parcellation_path = Path(parcellation_path)
self._label_map = None
@property
def label_map(self):
if self._label_map is None:
label_map_nii = nib.load(self.parcellation_path)
self._label_map = label_map_nii.get_data().astype(np.uint16)
return self._label_map
def get_resected_structures(self, resection_seg_path, ignore=None):
if ignore is None:
ignore = []
resected_dict = self.get_resected_labels_and_counts(resection_seg_path)
structures = []
for resected_number, num_voxels_resected in resected_dict.items():
structure_name = self.color_table.get_structure_from_label_number(
resected_number)
ignore_this = False
for substring_to_ignore in ignore:
if substring_to_ignore in structure_name:
ignore_this = True
break
if ignore_this:
continue
num_voxels_parcellation = np.count_nonzero(
self.label_map == resected_number)
ratio = num_voxels_resected / num_voxels_parcellation
structures.append((
structure_name,
num_voxels_resected,
ratio,
))
return list(zip(*structures))
def get_resected_labels_and_counts(self, resection_seg_path):
mask_nii = nib.load(resection_seg_path)
mask = mask_nii.get_data() > 0
masked_values = self.label_map[mask]
unique, counts = np.unique(masked_values, return_counts=True)
resected_dict = dict(zip(unique, counts))
return resected_dict
def print_percentage_of_resected_structures(self,
resection_seg_path,
hide_zeros=True):
structures, voxels, ratios = self.get_resected_structures(
resection_seg_path)
sort_by_ratio = np.argsort(ratios)
print('Percentage of each resected structure:')
for idx in reversed(sort_by_ratio):
ratio = ratios[idx]
structure = structures[idx]
percentage = int(ratio * 100)
if percentage == 0 and hide_zeros:
continue
structure_pretty = structure.replace('-', ' ')
print(f'{percentage:3}% of {structure_pretty}')
print()
sort_by_voxels = np.argsort(voxels)
total_voxels = sum(voxels)
print('The resection volume is composed of:')
for idx in reversed(sort_by_voxels):
ratio = voxels[idx] / total_voxels
structure = structures[idx]
percentage = int(ratio * 100)
if percentage == 0 and hide_zeros:
continue
structure_pretty = structure.replace('-', ' ')
print(f'{percentage:3}% is {structure_pretty}')
def plot_pie(
self,
resection_seg_path,
title=None,
show=True,
pct_threshold=2,
output_path=None,
ignore=None,
):
names, voxels, _ = self.get_resected_structures(
resection_seg_path, ignore=ignore)
colors = [
self.color_table.get_color_from_structure_name(name)
for name in names
]
fig, ax = plt.subplots()
sort_by_voxels = np.argsort(voxels)[::-1] # descending order
voxels = np.array(voxels)[sort_by_voxels]
percentages = (voxels / voxels.sum()) * 100
names = np.array(names)[sort_by_voxels]
colors = np.array(colors)[sort_by_voxels]
# Hide some values
def my_autopct(pct):
return f'{int(pct)}%' if pct > pct_threshold else ''
labels = names[:]
for i, pct in enumerate(percentages):
if pct <= pct_threshold:
labels[i] = ''
ax.pie(
percentages,
labels=labels,
colors=colors,
shadow=False,
autopct=my_autopct,
pctdistance=0.7,
)
if title is not None:
ax.set_title(title)
plt.tight_layout()
if output_path is not None:
fig.savefig(output_path, dpi=400)
if show:
plt.show()
return fig
def plot_bars(
self,
resection_seg_path,
title=None,
show=True,
output_path=None,
ignore=None,
):
names, _, ratios = self.get_resected_structures(
resection_seg_path, ignore=ignore)
colors = [
self.color_table.get_color_from_structure_name(name)
for name in names
]
fig, ax = plt.subplots()
sort_by_ratios = np.argsort(ratios)
ratios = np.array(ratios)[sort_by_ratios]
percentages = ratios * 100
names = np.array(names)[sort_by_ratios]
colors = np.array(colors)[sort_by_ratios]
y_pos = np.arange(len(names))
ax.barh(
y_pos,
percentages,
align='center',
color=colors,
tick_label=names,
)
ax.set_axisbelow(True) # https://stackoverflow.com/a/39039520
ax.grid()
ax.set_xlim((0, 105))
ax.xaxis.set_major_formatter(mtick.PercentFormatter())
if title is not None:
ax.set_title(title)
plt.tight_layout()
if output_path is not None:
fig.savefig(output_path, dpi=400)
if show:
plt.show()
return fig
def is_valid_number(self, number):
return self.color_table.is_valid_number(number)
class GIFParcellation(Parcellation):
def __init__(self, parcellation_path):
Parcellation.__init__(self, parcellation_path)
self.color_table = GIFColorTable()
class FreeSurferParcellation(Parcellation):
def __init__(self, parcellation_path):
Parcellation.__init__(self, parcellation_path)
self.color_table = FreeSurferColorTable()
class ColorTable:
def __init__(self):
self.fieldnames = (
'structure',
'red',
'green',
'blue',
'alpha',
)
def get_value_from_label_number(self, label_number, key):
try:
value = self._data_frame.loc[label_number][key]
except KeyError:
value = f'[Unkown label: {label_number}]'
return value
def get_row_from_structure_name(self, name):
mask = self._data_frame['structure'] == name
row = self._data_frame.loc[mask]
return row
def get_value_from_structure_name(self, name, key):
row = self.get_row_from_structure_name(name)
value = row[key]
return value
def get_structure_from_label_number(self, label_number):
return self.get_value_from_label_number(label_number, 'structure')
def get_color_from_structure_name(self, name):
row = self.get_row_from_structure_name(name)
if row.empty:
color = 0, 0, 0
else:
color = [row[c].values for c in ('red', 'green', 'blue')]
color = np.hstack(color)
color = np.array(color) / 255
return color
def is_valid_number(self, number):
return number in self._data_frame.index
class GIFColorTable(ColorTable):
def __init__(self):
ColorTable.__init__(self)
self.color_table_path = color_tables_dir / 'BrainAnatomyLabelsV3_0.txt'
self._data_frame = self.read_color_table()
def read_color_table(self):
df = pd.read_csv(
self.color_table_path,
index_col=0,
names=self.fieldnames,
sep=r'\s+', # there is a double space in the file
)
return df
class FreeSurferColorTable(ColorTable):
def __init__(self):
ColorTable.__init__(self)
self.color_table_path = color_tables_dir / 'FreeSurferLabels.ctbl'
self._data_frame = self.read_color_table()
def read_color_table(self):
df = pd.read_csv(
self.color_table_path,
index_col=0,
names=self.fieldnames,
sep=r'\s+',
skiprows=2,
)
return df
| StarcoderdataPython |
1761475 | <reponame>woodenCaliper/UpdateFusionPathForLogicool
#Author-woodenCaliper
#Description-fusion360がアップデートをするたびにexeのファイルパスが変わり、そのたびにlogicoolのプロファイルのリンクを修正する作業を自動化
import adsk.core, adsk.fusion, adsk.cam, traceback
import xml.etree.ElementTree as ET
import glob
import shutil, datetime
def serchTargetFile():
# logicoolのプロファイルのファイルパスをリストで取得
logicoolProfileList = glob.glob(r"C:\Users\*\AppData\Local\Logitech\Logitech Gaming Software\profiles\*.xml")
targetFile = 0
oldFusionPath = 0
# Fusionが対象のプロファイルを探す
for profileFilePath in logicoolProfileList:
xmlTree = ET.parse(profileFilePath)
root = xmlTree.getroot()
rootTag = root.tag
ns = "{"+rootTag[rootTag.index("{") + 1:rootTag.rindex("}")] +"}"
profilesTag = root
profileTag =0
for i in profilesTag.iter(ns+"profile"):
profileTag = i
print(profileTag.attrib["name"])
targetTag=0
for i in profileTag.iter(ns+"target"):
targetTag=i
if targetTag!=0:
print(targetTag.attrib["path"])
gamePath = targetTag.attrib["path"]
if "FUSION360.EXE" in gamePath.upper():
targetFile = profileFilePath
oldFusionPath = targetTag.attrib["path"]
return (targetFile, oldFusionPath)
return (None, None)
def replaceFusionPath(targetFile, oldFusionPath):
newFusionPath = glob.glob(r"C:\Users\*\AppData\Local\Autodesk\webdeploy\production\*\Fusion360.exe")
newFusionPath = newFusionPath[0]
if oldFusionPath != newFusionPath:
# backup
now = datetime.datetime.today().strftime("%Y_%m_%d_%Hh%Mm%Ss")
shutil.copyfile(targetFile, targetFile + now)
with open(targetFile, encoding="utf-8") as f:
dataLines = f.read()
with open(targetFile, encoding="utf-8", mode="w") as f:
dataLines = dataLines.replace(oldFusionPath, newFusionPath)
f.write(dataLines)
return newFusionPath
return False
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
(targetFile, oldFusionPath) = serchTargetFile()
newFusionPath = replaceFusionPath(targetFile, oldFusionPath)
if newFusionPath:
ui.messageBox(
"Change success" +"\n"
+ "\n"
+ oldFusionPath +"\n"
+"↓"+"\n"
+ newFusionPath
)
else:
ui.messageBox("No need change")
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) | StarcoderdataPython |
1655244 | <reponame>TheSampaio/DesktopVirtualAssistant
from os import truncate
from Includes import os, shutil, sleep
from AssistantConfig import voice, USERPATH
TIME = 10
def extension_type(event): # Get file's extension
if event.src_path[event.src_path.rindex('.') + 1:] != 'tmp' or 'crdownload':
return event.src_path[event.src_path.rindex('.') + 1:]
# --- Create respective folders for the extensions ----- #
def isText(event):
if extension_type(event) == 'txt':
return True
return False
def isPDF(event):
if extension_type(event) == 'pdf':
return True
return False
def isMP3(event):
if extension_type(event) in ('mp3', "wav", "m4a", "flac", "aiff", "ogg"):
return True
return False
def isImage(event):
if extension_type(event) in ('png', 'jpg', 'jpeg', 'bmp', 'gif', 'raw', 'ico'):
return True
return False
def isVideo(event):
if extension_type(event) in ('mov', 'mp4', 'avi', 'flv', 'ts'):
return True
return False
def isWord(event):
if extension_type(event) in ('doc', 'docx', 'odf'):
return True
return False
def isSpreadsheet(event):
if extension_type(event) in ('xls', 'xlsx'):
return True
return False
def isPresentation(event):
if extension_type(event) in ('ppt', 'pptx'):
return True
return False
def isCompacted(event):
if extension_type(event) in ('rar', 'zip', '7z', 'iso'):
return True
return False
def isCode(event):
if extension_type(event) in ('py', "jl", 'cs', 'js', 'php', 'html', 'sql', 'css', 'c', 'h', 'cpp', 'java', 'asp', 'aspx', 'axd', 'asx', 'asmx', 'ashx', 'cfm', 'yaws', 'swf', 'htm', 'xhtml', 'jhtml', "jsp", "jspx", "wss", "do", "cmd", "action", "pl", "phtml", "php3", "php4", "rb", "rhtml", "shtml", "rss", "svg", ):
return True
return False
def isExecutable(event):
if extension_type(event) in ('exe', 'msi', 'run', 'deb'):
return True
return False
def isInvoice(event):
if extension_type(event) in ('xml'):
return True
return False
def isTorrent(event):
if extension_type(event) in ('torrent'):
return True
return False
def isPackage(event):
if extension_type(event) in ('package'):
return True
return False
# ------------------------------------------------------ #
def makeFolder(event, foldername): # Create folders
os.chdir('{}\\Downloads'.format(USERPATH))
if extension_type(event) not in ('tmp', 'crdownload'):
voice('Novo arquivo detectado')
if os.path.exists(foldername):
# voice('A pasta destino já existe')
# voice('Pulando criação')
return os.getcwd() + os.sep + str(foldername)
else:
os.mkdir(str(foldername))
return os.getcwd() + os.sep + str(foldername)
def moveToFolder(event, path_to_new_folder): # Move files to inside folders
if extension_type(event) not in ('tmp', 'crdownload'):
try:
voice('Movendo arquivo em {} segundos...'.format(TIME))
sleep(TIME)
shutil.move(event.src_path, path_to_new_folder)
if event.src_path:
voice('Arquivo movido com sucesso')
else:
pass
except:
voice('O arquivo já existe na pasta destino')
fileName = event.src_path
file = fileName.replace('{}\\Downloads\\'.format(USERPATH), '')
try:
os.remove(file) # Delete duplicated files
voice('Deletei o arquivo para evitar duplicidades')
except:
pass
pass
| StarcoderdataPython |
3322157 | <gh_stars>0
from torchvision import transforms
from dataset.mscoco import MSCOCO
from torch.utils.data import DataLoader
from pytorch_lightning import LightningDataModule
from torch import tensor
class BaselineDataModule(LightningDataModule):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
self.test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# random crop, color jitter etc
self.train_transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur([1, 1])], p=0.5), # perhaps this blur is too much
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.491, 0.482, 0.446],
std=[0.247, 0.243, 0.261]),
])
self.train_dataset = MSCOCO(train=True, image_transforms=self.train_transform)
self.test_dataset = MSCOCO(train=False, image_transforms=self.test_transform)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size,
num_workers=48, pin_memory=True, drop_last=True)
def val_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size,
num_workers=48, pin_memory=True, drop_last=True) | StarcoderdataPython |
73890 | import json
import re
import ast
all_courses_file = "../vue-app/data/allCourses.json"
major_reqs_file = "../vue-app/data/course_requirements.json"
def read_data_files(all_courses_file, major_reqs_file):
with open(all_courses_file, 'r') as f:
all_courses = json.load(f)
with open(major_reqs_file, 'r') as f:
major_reqs = json.load(f)
return all_courses, major_reqs
def parse_condition(condition, threshold=None):
if 'req' in condition:
if condition.get('plain-string', False):
return []
return [condition['req']]
reqs = []
if condition['connection-type'] == 'all':
for subcondition in condition['reqs']:
reqs.extend(parse_condition(subcondition))
return reqs
elif condition['connection-type'] == 'any':
if condition['threshold-desc'] in ('select either', 'select any'):
threshold = 1
else:
threshold = condition['threshold']['cutoff']
for subcondition in condition['reqs']:
if threshold == 0:
break
reqs.extend(parse_condition(subcondition))
threshold -= 1
return reqs
def get_major_reqs(major):
"""
Give requirements for a major.
:param major: major code, e.g. 'major18pm'
:return: Requirements in an undetermined format?? {(course1, ... ): num_required}
"""
# idk????? smth to do w fireroad api?????
# currently assuming this just returns a list of classes but unsure how to handle weird cases
# (and most things are weird cases)
req_dict = major_reqs[major]
reqs = []
for condition in req_dict:
reqs.extend(parse_condition(condition))
return reqs
def get_course_prereqs(course):
# also sth to do with the fireroad api/our backend??
# again will assume this just returns a nice list of prereqs even tho probably fake news
print(course)
prereq_str = all_courses[course].get("prerequisites", "None")
if prereq_str in ("None", "''Permission of instructor''"):
return []
prereq_str = re.sub("(\(.*\))/''permission of instructor''", lambda x: x.group(1)[1:-1], prereq_str)
prereq_str = re.sub("/''permission of instructor''", "", prereq_str)
GIR_courses = {"GIR:CAL1": "18.01", "GIR:CAL2": "(18.02, 18.022)", "GIR:PHY1": "(8.01, 8.01L, 8.012)",
"GIR:PHY2": "(8.02, 8.022)", "GIR:BIOL": "(7.012, 7.013, 7.014, 7.015, 7.016)",
"GIR:CHEM": "(3.091, 5.111, 5.112)"}
for GIR in GIR_courses:
prereq_str = prereq_str.replace(GIR, GIR_courses[GIR])
prereq_str = re.sub("''Coreq: ([\w\.]*)''", lambda x: x.group(1), prereq_str)
prereq_str = re.sub(" ", "", prereq_str)
prereq_str = re.sub("/", ",", prereq_str)
prereq_str = re.sub(",", ",,", prereq_str)
prereq_str = "[" + prereq_str + "]"
prereq_str = re.sub("([\[\]\(\),])([\w\.]+)([\[\]\(\),])",
lambda x: x.group(1) + "'" + x.group(2) + "'" + x.group(3), prereq_str)
prereq_str = re.sub(",,", ",", prereq_str)
return ast.literal_eval(prereq_str)
def find_offerings(course, start_semester, end_semester):
# returns a list of all semester numbers this class will be offered
# again vaguely sketchy, this time bc mit can be sketchy abt this
# but we will just assume for now that generally actual prereqs are not that sketchy
semesters = list(range(start_semester, end_semester+1))
course_info = all_courses[course]
if not course_info["offered_fall"]:
semesters = [semester for semester in semesters if semester % 2 == 0]
season = 1
elif not course_info["offered_spring"]:
semesters = [semester for semester in semesters if semester % 2 == 1]
season = 2
if "not-offered-year" in course_info:
excluded_semester = 2 * (course_info["not-offered-year"].split()[0] - 2000) + season
for i in range(excluded_semester, end_semester + 1, 2):
if i in semesters:
semesters.remove(i)
return semesters
def find_next_offering(course, start_semester):
current_season = start_semester % 2
current_season = current_season + 2 if current_season == 0 else current_season
course_info = all_courses[course]
if course_info["offered_fall"] and course_info["offered_spring"]:
return start_semester
elif course_info["offered_fall"]:
course_season = 1
else:
course_season = 2
offered_semester = start_semester + (current_season != course_season)
if "not-offered-year" in course_info:
excluded_semester = 2 * (course_info["not-offered-year"].split()[0] - 2000) + course_season
if offered_semester == excluded_semester:
offered_semester += 2
return offered_semester
def find_schedule(major, start_semester, end_semester = None, past_schedule = None, existing_schedule = None, cost_function = "classes"):
# can prob just assume semesters are numbered as 2*(year - 2000) + (1 if fall) or sth
# past_schedule is a dict from sem number to class we hardcode taking that sem
# existing_schedule is a dict from sem number to class we want to hardcode taking that sem
# start_semester is the "current" semester
# for now let's just assume we minimize classes
if end_semester is None:
end_semester = start_semester + 7
if past_schedule is None:
past_schedule = {}
if existing_schedule is None:
existing_schedule = {}
# TODO: comment the next few lines back in when we are not testing
# all_reqs = get_reqs("GIRS")
all_reqs = get_major_reqs(major)
print(all_reqs)
all_reqs.extend(["5.111", "7.012", "18.02", "8.02", "24.900", "24.917", "21M.301", "11.011", "21M.600", "14.01", "14.03", "21W.757"])
new_reqs = list(set(all_reqs)) # idk but there's probably duplicates or sth???
all_reqs = []
prereqs = {} # maps course to a list of all its prereqs
while (len(new_reqs) > 0):
next_new_reqs = []
# print(new_reqs, "help")
for course in new_reqs:
all_reqs.append(course)
prereqs[course] = []
course_prereqs = get_course_prereqs(course)
# print("123", course_prereqs)
for prereq in course_prereqs:
while not type(prereq) == str:
prereq = prereq[0]
# print("zkdjhsdjflkd", prereq)
# print("here", course, prereq)
if prereq in past_schedule:
continue
if not (prereq in all_reqs or prereq in next_new_reqs or prereq in new_reqs):
# all_reqs.append(prereq)
next_new_reqs.append(prereq)
# print(course, prereqs[course])
prereqs[course].append(prereq)
# print(prereqs[course])
# print("")
new_reqs = next_new_reqs
levels = []
levels.append(all_reqs)
curr_level = 0
while len(levels[curr_level]) > 0:
curr_level_courses = levels[curr_level]
next_level = []
for course in curr_level_courses:
for prereq in prereqs[course]:
if not prereq in next_level:
next_level.append(prereq)
levels.append(next_level)
# print("dskfjsdhfldksf", curr_level, next_level, levels[curr_level])
for course in next_level:
levels[curr_level].remove(course)
# print("dskfjsdhfldksf", curr_level, next_level, levels[curr_level])
curr_level += 1
level_lookup = {}
for level in levels:
for course in level:
level_lookup[course] = level
ans = {}
for i in range(start_semester, end_semester + 1):
ans[i] = []
# added = []
# assume existing_schedule is sorted, btw idk how to sort a dictionary but can add code later
# also assume that none of these are terrible or sth
# for sem in existing_schedule:
# sem_courses = existing_schedule[sem]
# for course in sem_courses:
# ans[sem].append(course)
# course_prereqs = get_course_prereqs(course)
# for prereq in course_prereqs:
# temporarily assume four classes a semester, later can "binary search" or sth
curr_semester = start_semester
no_prereqs = levels[0]
for course in levels[0]:
prereqs = get_course_prereqs(course)
for prereq in prereqs:
if prereq in no_prereqs:
no_prereqs.remove(prereq)
for course in levels[1]:
prereqs = get_course_prereqs(course)
for prereq in prereqs:
if prereq in no_prereqs:
no_prereqs.remove(prereq)
for course in no_prereqs:
levels[0].remove(course)
print(no_prereqs)
print(levels)
for i in range(curr_level - 1, -1, -1):
if (curr_semester > end_semester):
return -1
# print("asjkshdj", i)
level_courses = levels[i]
# print("asjkshdj", level_courses)
for course in level_courses:
while len(ans[curr_semester]) >= 4:
curr_semester += 1
if (curr_semester > end_semester):
return -1
next_offering = find_next_offering(course, curr_semester)
# print("here", ans[next_offering])
ans[next_offering].append(course)
# print(ans[next_offering])
while len(no_prereqs) > 0 and len(ans[curr_semester]) < 4:
course = no_prereqs[0]
no_prereqs.remove(course)
ans[curr_semester].append(course)
curr_semester += 1
return ans
if __name__ == "__main__":
all_courses, major_reqs = read_data_files(all_courses_file, major_reqs_file)
#print(find_schedule("major6-3", 0))
print(find_schedule("major20", 41))
| StarcoderdataPython |
3211290 | #!/usr/bin/env python3
def write_todo(open_file, todo):
line = ' - ' + todo + '\n'
open_file.write(line)
def write_todos_for_module(open_file, todo_list):
for todo in todo_list:
write_todo(open_file, todo)
def write_newline(open_file):
open_file.write('\n')
def format_as_details(open_file, todo_list):
open_file.write(' <details>\n')
open_file.write(' <summary> Todos ({})</summary>\n\n'.format(num_todos))
write_todos_for_module(open_file, todo_list)
open_file.write('\n </details>\n')
def write_header(open_file, title, level=1):
line = '#' * level + ' ' + title + '\n'
open_file.write(line)
def make_filepath_link(filepath):
return '[' + filepath + '](../' + filepath + ')'
def write_filepath(open_file, filepath):
filepath_link = make_filepath_link(filepath)
line = str(i) + '. ' + filepath_link + '\n'
open_file.write(line)
d = {}
with open('todos/todos.txt', 'r') as open_file:
for line in open_file:
if 'TODO' not in line:
continue
email, filepath, todo = line.lstrip('<').replace('>', '#').split('#')
email = email.strip().replace('_', '\_')
filepath = filepath.strip().replace('_', '\_')
todo = todo.strip().replace('_', '\_')
try:
filepath_to_todos = d[email]
except KeyError:
d[email] = {filepath:[todo]}
else:
filepath_to_todos.setdefault(filepath, []).append(todo)
with open('todos/README.md', 'w') as open_file:
for email in d:
write_header(open_file, email, level=1)
for i, filepath in enumerate(d[email], start=1):
write_filepath(open_file, filepath)
todo_list = d[email][filepath]
num_todos = len(todo_list)
if num_todos > 3:
format_as_details(open_file, todo_list)
else:
write_todos_for_module(open_file, todo_list)
write_newline(open_file)
write_newline(open_file)
write_newline(open_file)
| StarcoderdataPython |
152680 | #!/usr/bin/python3
import sys
# stream processing
# streams have groups and garbage.
# groups are delimited by {}.
# groups can contain other groups and garbage.
# garbage is delimited by <>.
# garbage can't contain groups.
# any character followed by ! is cancelled.
# goal is to find total score for all groups.
# group score = 1 + score of containing group.
# step one, remove all the !. from the stream.
def cancelChars(chars):
for k, v in enumerate(chars):
if v == '':
continue
if v == '!':
chars[k] = ''
chars[k+1] = ''
return [x for x in chars if x != '']
# step two, remove all the <[^>]> from the stream.
def removeGarbage(chars):
trash = False
removed = 0
for char in chars:
if not trash and char == '<':
trash = True
removed -= 1
if trash and char == '>':
trash = False
if trash:
removed += 1
return removed
def countGroups(chars):
groups = []
depth = 0
for k, v in enumerate(chars):
if v == '{':
depth += 1
if v == '}':
groups.append(depth)
depth -= 1
return sum(groups)
# Unit tests for countGroups
tt = {
'a': ('<>', 0),
'b': ('<random characters>', 17),
'c': ('<<<<>', 3),
'd': ('<{!>}>', 2),
'e': ('<!!>', 0),
'f': ('<!!!>>', 0),
'g': ('<{o"i!a,<{i<a>', 10),
}
for k, v in tt.items():
chars = list(v[0])
chars = cancelChars(chars)
result = removeGarbage(chars)
if result != v[1]:
print("FAIL: input ", v[0], ": expected ", v[1], ", got ", result, sep="")
# The input is not checked for sanity, just existence.
stream = sys.stdin.readlines()
if len(stream) == 0:
print("stream missing!")
sys.exit(1)
chars = list(stream[0])
chars = cancelChars(chars)
print(removeGarbage(chars))
| StarcoderdataPython |
3267512 | <reponame>w60083/SocialNetworkAPI
from django.urls import path, include
from . import views
urlpatterns = [
path('', include('User.urls')),
]
| StarcoderdataPython |
3293748 | from random import *
N = 100
n = randrange(2,N+1)
m = randrange(1,1+(n*(n+1)))
print n,m
for i in xrange(m):
a = randrange(1,n+1)
b = randrange(1,n+1)
print a, b
| StarcoderdataPython |
1639028 | <reponame>qfgaohao/keras-io
"""
Title: Using pre-trained word embeddings
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/05/05
Last modified: 2020/05/05
Description: Text classification on the Newsgroup20 dataset using pre-trained GloVe word embeddings.
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
"""
## Introduction
In this example, we show how to train a text classification model that uses pre-trained
word embeddings.
We'll work with the Newsgroup20 dataset, a set of 20,000 message board messages
belonging to 20 different topic categories.
For the pre-trained word embeddings, we'll use
[GloVe embeddings](http://nlp.stanford.edu/projects/glove/).
"""
"""
## Download the Newsgroup20 data
"""
data_path = keras.utils.get_file(
"news20.tar.gz",
"http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.tar.gz",
untar=True,
)
"""
## Let's take a look at the data
"""
import os
import pathlib
data_dir = pathlib.Path(data_path).parent / "20_newsgroup"
dirnames = os.listdir(data_dir)
print("Number of directories:", len(dirnames))
print("Directory names:", dirnames)
fnames = os.listdir(data_dir / "comp.graphics")
print("Number of files in comp.graphics:", len(fnames))
print("Some example filenames:", fnames[:5])
"""
Here's a example of what one file contains:
"""
print(open(data_dir / "comp.graphics" / "38987").read())
"""
As you can see, there are header lines that are leaking the file's category, either
explicitly (the first line is literally the category name), or implicitly, e.g. via the
`Organization` filed. Let's get rid of the headers:
"""
samples = []
labels = []
class_names = []
class_index = 0
for dirname in sorted(os.listdir(data_dir)):
class_names.append(dirname)
dirpath = data_dir / dirname
fnames = os.listdir(dirpath)
print("Processing %s, %d files found" % (dirname, len(fnames)))
for fname in fnames:
fpath = dirpath / fname
f = open(fpath, encoding="latin-1")
content = f.read()
lines = content.split("\n")
lines = lines[10:]
content = "\n".join(lines)
samples.append(content)
labels.append(class_index)
class_index += 1
print("Classes:", class_names)
print("Number of samples:", len(samples))
"""
There's actually one category that doesn't have the expected number of files, but the
difference is small enough that the problem remains a balanced classification problem.
"""
"""
## Shuffle and split the data into training & validation sets
"""
# Shuffle the data
seed = 1337
rng = np.random.RandomState(seed)
rng.shuffle(samples)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
# Extract a training & validation split
validation_split = 0.2
num_validation_samples = int(validation_split * len(samples))
train_samples = samples[:-num_validation_samples]
val_samples = samples[-num_validation_samples:]
train_labels = labels[:-num_validation_samples]
val_labels = labels[-num_validation_samples:]
"""
## Create a vocabulary index
Let's use the `TextVectorization` to index the vocabulary found in the dataset.
Later, we'll use the same layer instance to vectorize the samples.
Our layer will only consider the top 20,000 words, and will truncate or pad sequences to
be actually 200 tokens long.
"""
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
vectorizer = TextVectorization(max_tokens=20000, output_sequence_length=200)
text_ds = tf.data.Dataset.from_tensor_slices(train_samples).batch(128)
vectorizer.adapt(text_ds)
"""
You can retrieve the computed vocabulary used via `vectorizer.get_vocabulary()`. Let's
print the top 5 words:
"""
vectorizer.get_vocabulary()[:5]
"""
Let's vectorize a test sentence:
"""
output = vectorizer([["the cat sat on the mat"]])
output.numpy()[0, :6]
"""
As you can see, "the" gets represented as "2". Why not 0, given that "the" was the first
word in the vocabulary? That's because index 0 is reserved for padding and index 1 is
reserved for "out of vocabulary" tokens.
Here's a dict mapping words to their indices:
"""
voc = vectorizer.get_vocabulary()
word_index = dict(zip(voc, range(2, len(voc))))
"""
As you can see, we obtain the same encoding as above for our test sentence:
"""
test = ["the", "cat", "sat", "on", "the", "mat"]
[word_index[w] for w in test]
"""
## Load pre-trained word embeddings
"""
"""
Let's download pre-trained GloVe embeddings (a 822M zip file).
You'll need to run the following commands:
```
!wget http://nlp.stanford.edu/data/glove.6B.zip
!unzip -q glove.6B.zip
```
"""
"""
The archive contains text-encoded vectors of various sizes: 50-dimensional,
100-dimensional, 200-dimensional, 300-dimensional. We'll use the 100D ones.
Let's make a dict mapping words (strings) to their NumPy vector representation:
"""
path_to_glove_file = os.path.join(
os.path.expanduser("~"), ".keras/datasets/glove.6B.100d.txt"
)
embeddings_index = {}
with open(path_to_glove_file) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print("Found %s word vectors." % len(embeddings_index))
"""
Now, let's prepare a corresponding embedding matrix that we can use in a Keras
`Embedding` layer. It's a simple NumPy matrix where entry at index `i` is the pre-trained
vector for the word of index `i` in our `vectorizer`'s vocabulary.
"""
num_tokens = len(voc) + 2
embedding_dim = 100
hits = 0
misses = 0
# Prepare embedding matrix
embedding_matrix = np.zeros((num_tokens, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
# This includes the representation for "padding" and "OOV"
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
"""
Next, we load the pre-trained word embeddings matrix into an `Embedding` layer.
Note that we set `trainable=False` so as to keep the embeddings fixed (we don't want to
update them during training).
"""
from tensorflow.keras.layers import Embedding
embedding_layer = Embedding(
num_tokens,
embedding_dim,
embeddings_initializer=keras.initializers.Constant(embedding_matrix),
trainable=False,
)
"""
## Build the model
A simple 1D convnet with global max pooling and a classifier at the end.
"""
from tensorflow.keras import layers
int_sequences_input = keras.Input(shape=(None,), dtype="int64")
embedded_sequences = embedding_layer(int_sequences_input)
x = layers.Conv1D(128, 5, activation="relu")(embedded_sequences)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)
preds = layers.Dense(len(class_names), activation="softmax")(x)
model = keras.Model(int_sequences_input, preds)
model.summary()
"""
## Train the model
First, convert our list-of-strings data to NumPy arrays of integer indices. The arrays
are right-padded.
"""
x_train = vectorizer(np.array([[s] for s in train_samples])).numpy()
x_val = vectorizer(np.array([[s] for s in val_samples])).numpy()
y_train = np.array(train_labels)
y_val = np.array(val_labels)
"""
We use categorical crossentropy as our loss since we're doing softmax classification.
Moreover, we use `sparse_categorical_crossentropy` since our labels are integers.
"""
model.compile(
loss="sparse_categorical_crossentropy", optimizer="rmsprop", metrics=["acc"]
)
model.fit(x_train, y_train, batch_size=128, epochs=20, validation_data=(x_val, y_val))
"""
## Export an end-to-end model
Now, we may want to export a `Model` object that takes as input a string of arbitrary
length, rather than a sequence of indices. It would make the model much more portable,
since you wouldn't have to worry about the input preprocessing pipeline.
Our `vectorizer` is actually a Keras layer, so it's simple:
"""
string_input = keras.Input(shape=(1,), dtype="string")
x = vectorizer(string_input)
preds = model(x)
end_to_end_model = keras.Model(string_input, preds)
probabilities = end_to_end_model.predict(
[["this message is about computer graphics and 3D modeling"]]
)
class_names[np.argmax(probabilities[0])]
| StarcoderdataPython |
1727758 | # -*- coding: utf-8 -*-
from datetime import datetime
try:
import requests
except ImportError:
from .packages import requests
DEFAULT_LOGPLEX_URL = 'https://east.logplex.io/logs'
DEFAULT_LOGPLEX_TOKEN = None
DETAULT_LOGPLEX_TIMEOUT = 2
class Logplex(object):
"""A Logplex client."""
def __init__(self, token=None, url=None, timeout=2):
super(Logplex, self).__init__()
self.url = url or DEFAULT_LOGPLEX_URL
self.token = token or DEFAULT_LOGPLEX_TOKEN
self.timeout = timeout
self.hostname = 'myhost'
self.procid = 'python-logplex'
self.msgid = '-'
self.structured_data = '-'
self.timeout = DETAULT_LOGPLEX_TIMEOUT
self.session = requests.session()
def format_data(self, data):
pkt = "<190>1 "
pkt += datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S+00:00 ")
pkt += '{} '.format(self.hostname)
pkt += '{} '.format(self.token)
pkt += '{} '.format(self.procid)
pkt += '{} '.format(self.msgid)
pkt += '{} '.format(self.structured_data)
pkt += data
return '{} {}'.format(len(pkt), pkt)
def puts(self, s):
self.send_data(s)
def send_data(self, s):
if self.token:
auth = ('token', self.token)
headers = {'Content-Type': 'application/logplex-1'}
data = self.format_data(s)
self.session.post(self.url,
auth=auth,
headers=headers,
data=data,
timeout=self.timeout,
verify=False
)
| StarcoderdataPython |
3280980 | <filename>superset/databases/filters.py<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Set
from flask import g
from flask_babel import lazy_gettext as _
from sqlalchemy import or_
from sqlalchemy.orm import Query
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.sqltypes import JSON
from superset import app, security_manager
from superset.models.core import Database
from superset.views.base import BaseFilter
def can_access_databases(
view_menu_name: str,
) -> Set[str]:
return {
security_manager.unpack_database_and_schema(vm).database
for vm in security_manager.user_view_menu_names(view_menu_name)
}
class DatabaseFilter(BaseFilter): # pylint: disable=too-few-public-methods
# TODO(bogdan): consider caching.
def apply(self, query: Query, value: Any) -> Query:
if security_manager.can_access_all_databases():
return query
database_perms = security_manager.user_view_menu_names("database_access")
schema_access_databases = can_access_databases("schema_access")
datasource_access_databases = can_access_databases("datasource_access")
return query.filter(
or_(
self.model.perm.in_(database_perms),
self.model.database_name.in_(
[*schema_access_databases, *datasource_access_databases]
),
)
)
class DatabaseUploadEnabledFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Custom filter for the GET list that filters all databases based on allow_file_upload
"""
name = _("Upload Enabled")
arg_name = "upload_is_enabled"
def apply(self, query: Query, value: Any) -> Query:
filtered_query = query.filter(Database.allow_file_upload)
datasource_access_databases = can_access_databases("datasource_access")
if hasattr(g, "user"):
allowed_schemas = [
app.config["ALLOWED_USER_CSV_SCHEMA_FUNC"](db, g.user)
for db in datasource_access_databases
]
if len(allowed_schemas):
return filtered_query
return filtered_query.filter(
or_(
cast(Database.extra, JSON)["schemas_allowed_for_file_upload"]
is not None,
cast(Database.extra, JSON)["schemas_allowed_for_file_upload"] != [],
)
)
| StarcoderdataPython |
82624 | import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, num2date
from matplotlib import patches
import matplotlib.patches as mpatches
from matplotlib import ticker, cm, colors
import sys
sys.path.insert(0, "..")
sys.path.insert(0, "../utilities/")
def plot_acfs(rad="kap", fname="figs/acfs.png"):
w_range, v_range = np.arange(0, 1001, 5), np.arange(0, 1001, 5)
w, v = np.meshgrid(w_range, v_range)
cmap = cm.gray
cmap.set_bad(color="k")
levs = [10**c for c in np.linspace(-6, 0, 10, dtype=float)]
fig = plt.figure(figsize=(5, 4), dpi=100)
X = np.load("../../SuperDARN-Clustering/sd/data/%s.acfs.npy"%rad)
count = np.nansum(X)
X = X / count
ax = fig.add_subplot(111)
cs = ax.contour(w, v, X, levs, linewidths=0.5, colors='k', norm=colors.LogNorm())
ax.clabel(cs, levels=levs, inline=1, fontsize=6, fmt=matplotlib.ticker.LogFormatterSciNotation())
cntr = ax.contourf(w, v, X, levs, norm=colors.LogNorm(), cmap=cmap)
ax.set_xlim(5, 100)
ax.set_ylim(5, 100)
cb = fig.colorbar(cntr, ax=ax, shrink=0.7)
cb.set_label(r"$P(w,v), s^{2}m^{-2}$")
ax.set_xlabel(r"Spectral Width (W), $ms^{-1}$")
ax.set_ylabel(r"Velocity (V), $ms^{-1}$")
ax.plot(w_range, equations[0](w_range), ls="--", color="r", lw=1., label=r"$|v|+\frac{w}{3}\leq 30$")
ax.plot(w_range, equations[1](w_range), ls="--", color="b", lw=1., label=r"$|v|+\frac{w}{4}\leq 60$")
ax.plot(w_range, equations[2](w_range), ls="--", color="g", lw=1., label=r"$|v|-0.139w+0.00113w^2\leq 33.1$")
ax.text(0.25, 1.05, "Rad:"+rad +"(2011-2015)", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes)
ax.text(0.75, 1.05, r"ACFs~%.2f$\times 10^6$"%(count/1e6), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes)
fig.savefig(fname, bbox_inches="tight")
return | StarcoderdataPython |
191915 | <reponame>scramjetorg/framework-python<gh_stars>10-100
from scramjet.pyfca import Pyfca, DropChunk
import asyncio
from scramjet.ansi_color_codes import *
from os import environ
import scramjet.utils as utils
from collections.abc import Iterable, AsyncIterable
import re
DEBUG = 'DATASTREAM_DEBUG' in environ or 'SCRAMJET_DEBUG' in environ
tr = utils.print_trimmed
def log(stream, *args):
if DEBUG: # pragma: no cover
utils.LogWithTimer.log(f"{grey}{stream.name}{reset}", *args)
class UnsupportedOperation(Exception):
pass
class StreamAlreadyConsumed(Exception):
pass
class Stream():
def __init__(self, max_parallel=64, upstream=None, origin=None, name="datastream"):
self._upstream = upstream
self._origin = origin if origin else self
self.name = name
# whether we can write to the stream instance
self._writable = True
# whether the stream was already "used" (transformed/read from)
self._consumed = False
self._pyfca = upstream._pyfca if upstream else Pyfca(max_parallel)
self._ready_to_start = asyncio.Future()
self._sinks = []
log(self, f'INIT stream created with pyfca {self._pyfca}')
def __await__(self):
raise TypeError(
"Stream objects cannot be awaited on. To get data from a stream, "
"use a sink method (such as .to_list()) and await on that."
)
async def __aiter__(self):
self._uncork()
while True:
chunk = await self._pyfca.read()
if chunk is None:
break
yield chunk
def _uncork(self):
if not self._ready_to_start.done():
self._ready_to_start.set_result(True)
log(self, f'{green}uncorked{reset}')
if self._upstream:
log(self, f'uncorking upstream: {self._upstream.name}')
self._upstream._uncork()
def _mark_consumed(self):
if self._consumed: # cannot consume the same stream twice
raise StreamAlreadyConsumed
else:
self._consumed = True
def _as(self, target_class):
"""Create a stream of type target_class from current one."""
return target_class(
upstream=self,
max_parallel=self._pyfca.max_parallel,
name=f'{self.name}+_'
)
def use(self, func):
"""Perform a function on the whole stream and return the result."""
return func(self)
def write(self, chunk):
"""Write a single item to the datastream."""
return self._origin._pyfca.write(chunk)
def end(self):
"""Mark the end of input to the datastream."""
self._pyfca.end()
async def read(self):
"""Read a single item from the datastream."""
# cannot read from stream consumed by something else
if self._consumed:
raise StreamAlreadyConsumed
self._uncork()
return await self._pyfca.read()
@classmethod
def read_from(cls, source, max_parallel=64, chunk_size=None):
"""
Create a new stream from specified source, which must be either
an Iterable or implement .read() method.
"""
if chunk_size:
if hasattr(source, 'read'):
return cls.from_callback(
max_parallel, source.read, chunk_size)
else:
msg = (f"chunk_size was specified, but source {source} "
"does not implement read() method.")
raise UnsupportedOperation(msg)
else:
if isinstance(source, (Iterable, AsyncIterable)):
return cls.from_iterable(
source, max_parallel=max_parallel)
else:
msg = (f"Source {source} is not iterable. It cannot be used "
"unless it exposes read() method and chunk_size "
"is specified.")
raise UnsupportedOperation(msg)
@classmethod
def from_iterable(cls, iterable, max_parallel=64):
"""Create a new stream from an iterable object."""
stream = cls(max_parallel)
async def consume():
await stream._ready_to_start
if isinstance(iterable, Iterable):
for item in iterable:
await stream._pyfca.write(item)
if isinstance(iterable, AsyncIterable):
[await stream._pyfca.write(item) async for item in iterable]
stream._pyfca.end()
asyncio.create_task(consume())
stream._writable = False
return stream
@classmethod
def from_callback(cls, max_parallel, callback, *args):
"""Create a new stream using callback to get chunks."""
stream = cls(max_parallel)
async def consume():
await stream._ready_to_start
while True:
chunk = callback(*args)
if asyncio.iscoroutine(chunk):
chunk = await chunk
if chunk == '' or chunk == b'':
break
await stream._pyfca.write(chunk)
stream._pyfca.end()
asyncio.create_task(consume())
stream._writable = False
return stream
def map(self, func, *args):
"""Transform each chunk using a function."""
self._mark_consumed()
new_stream = self.__class__(upstream=self, origin=self._origin, name=f'{self.name}+m')
async def run_mapper(chunk):
if args:
log(new_stream, f'calling mapper {func} with args: {chunk, *args}')
result = func(chunk, *args)
if asyncio.iscoroutine(result):
result = await result
log(new_stream, f'mapper result: {tr(chunk)} -> {tr(result)}')
return result
log(new_stream, f'adding mapper: {func}')
new_stream._pyfca.add_transform(run_mapper)
return new_stream
def each(self, func, *args):
"""Perform an operation on each chunk and return it unchanged."""
async def mapper(chunk):
result = func(chunk, *args)
if asyncio.iscoroutine(result):
await result
return chunk
return self.map(mapper)
def decode(self, encoding):
"""Convert chunks of bytes into strings using specified encoding."""
import codecs
# Incremental decoders handle characters split across inputs.
# Input with only partial data yields empty string - drop these.
decoder = codecs.getincrementaldecoder(encoding)()
return self._as(StringStream).map(
lambda chunk: decoder.decode(chunk) or DropChunk
)
def filter(self, func, *args):
"""Keep only chunks for which func evaluates to True."""
self._mark_consumed()
new_stream = self.__class__(upstream=self, origin=self._origin, name=f'{self.name}+f')
async def run_filter(chunk):
if args:
log(new_stream, f'calling filter {func} with args: {chunk, *args}')
decision = func(chunk, *args)
if asyncio.iscoroutine(decision):
decision = await decision
log(new_stream, f'filter result: {tr(chunk)} -> {cyan}{decision}{reset}')
return chunk if decision else DropChunk
log(new_stream, f'adding filter: {func}')
new_stream._pyfca.add_transform(run_filter)
return new_stream
def flatmap(self, func, *args):
"""Run func on each chunk and return all results as separate chunks."""
self._mark_consumed()
new_stream = self.__class__(
max_parallel=self._pyfca.max_parallel, origin=self._origin, name=f'{self.name}+fm'
)
async def consume():
self._uncork()
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
results = func(chunk, *args)
if asyncio.iscoroutine(results):
results = await results
log(self, f'{cyan}split:{reset} -> {repr(results)}')
for item in results:
log(new_stream, f'put: {tr(item)}')
await new_stream._pyfca.write(item)
log(new_stream, f'{blue}drained{reset}')
log(new_stream, f'ending pyfca {new_stream._pyfca}')
new_stream._pyfca.end()
asyncio.create_task(consume(), name='flatmap-consumer')
return new_stream
def batch(self, func, *args):
"""
Convert a stream of chunks into a stream of lists of chunks.
func: called on each chunk to determine when the batch will end.
"""
self._mark_consumed()
new_stream = self.__class__(
max_parallel=self._pyfca.max_parallel, origin=self._origin, name=f'{self.name}+b'
)
async def consume():
self._uncork()
batch = []
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
batch.append(chunk)
if args:
log(new_stream, f'calling {func} with args: {chunk, *args}')
if func(chunk, *args):
log(new_stream, f'{pink}put batch:{reset} {tr(batch)}')
await new_stream._pyfca.write(batch)
batch = []
if len(batch):
log(new_stream, f'{pink}put batch:{reset} {tr(batch)}')
await new_stream._pyfca.write(batch)
log(new_stream, f'ending pyfca {new_stream._pyfca}')
new_stream._pyfca.end()
asyncio.create_task(consume())
return new_stream
def sequence(self, sequencer, initialPartial=None):
"""
Change how the data is chopped into chunks.
sequencer: two-argument function taking partial result from previous
operation and current chunk. It should return an iterable; all items
from the iterable except the last one will become new chunks, and the
last one will be fed to the next call of the sequencer.
"""
self._mark_consumed()
new_stream = self.__class__(
max_parallel=self._pyfca.max_parallel, origin=self._origin, name=f'{self.name}+s'
)
async def consume():
self._uncork()
partial = initialPartial
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
chunks = sequencer(partial, chunk)
if asyncio.iscoroutine(chunks):
chunks = await chunks
log(new_stream, f'{blue}{len(chunks)} chunks:{reset} {chunks}')
for chunk in chunks[:-1]:
log(new_stream, f'put: {tr(chunk)}')
await new_stream._pyfca.write(chunk)
log(new_stream, f'carrying over partial result: {tr(chunks[-1])}')
partial = chunks[-1]
log(new_stream, f'leftover: {tr(partial)}')
# pytest claims that line #315 is not reacheable, cause of if statement is always True.
# TODO: refactor code here or find exact reason for pytest problem
if partial: # pragma: no cover
log(new_stream, f'put: {tr(partial)}')
await new_stream._pyfca.write(partial)
log(new_stream, f'ending pyfca {new_stream._pyfca}')
new_stream._pyfca.end()
asyncio.create_task(consume())
return new_stream
def pipe(self, target):
"""Forward all chunks from current stream into target."""
self._consumed = True
self._sinks.append(target)
async def consume():
self._uncork()
while True:
chunk = await self._pyfca.read()
if chunk is None:
break
drains = [target._pyfca.write(chunk) for target in self._sinks]
await asyncio.gather(*drains)
for target in self._sinks:
target._pyfca.end()
if len(self._sinks) == 1:
asyncio.create_task(consume(), name='pipe-consumer')
return target
async def to_list(self):
"""Create a list with all resulting stream chunks."""
self._mark_consumed()
self._uncork()
result = []
log(self, f'sink: {repr(result)}')
chunk = await self._pyfca.read()
while chunk is not None:
log(self, f'got: {tr(chunk)}')
result.append(chunk)
chunk = await self._pyfca.read()
return result
async def write_to(self, target):
"""
Write all resulting stream chunks into target.
target: object implementing .write() method
"""
self._mark_consumed()
self._uncork()
log(self, f'sink: {repr(target)}')
chunk = await self._pyfca.read()
while chunk is not None:
log(self, f'got: {tr(chunk)}')
write = target.write(chunk)
if asyncio.iscoroutine(write):
await write
chunk = await self._pyfca.read()
return target
async def reduce(self, func, initial=None):
"""
Apply two-argument func to elements from the stream cumulatively,
producing an awaitable that will resolve to a single value when the
stream ends. For a stream of [1,2,3,4] the result will be
func(func(func(1,2),3),4).
"""
self._mark_consumed()
self._uncork()
if initial is None:
accumulator = await self._pyfca.read()
log(self, f'got: {tr(accumulator)}')
else:
accumulator = initial
log(self, f'reducer: initialized accumulator with {initial}')
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
accumulator = func(accumulator, chunk)
if asyncio.iscoroutine(accumulator):
accumulator = await accumulator
log(self, f'reduce - intermediate result: {accumulator}')
return accumulator
class StringStream(Stream):
def __init__(self, max_parallel=64, upstream=None, origin=None, name="stringstream"):
super().__init__(max_parallel=max_parallel, upstream=upstream, origin=origin, name=name)
def parse(self, func, *args):
"""Transform StringStream into Stream."""
return self._as(Stream).map(func, *args)
def match(self, pattern):
"""Extract matching parts of chunk as new chunks."""
regex = re.compile(pattern)
def mapper(chunk):
matches = regex.findall(chunk)
if regex.groups <= 1:
return matches
else:
flattened = []
for tuple in matches:
flattened.extend(tuple)
return flattened
return self.flatmap(mapper)
def split(self, separator=None):
"""Split each chunk into multiple new chunks."""
def splitter(part, chunk):
words = (part+chunk).split(sep=separator)
# .split() without delimiter ignores trailing whitespace, e.g.
# "foo bar ".split() -> ["foo", "bar"] and not ["foo", "bar", ""].
# This would incorrectly treat last word as partial result, so we
# add an empty string as a sentinel.
if not separator and chunk[-1].isspace():
words.append("")
return words
return self.sequence(splitter, "")
| StarcoderdataPython |
25232 | <gh_stars>0
test_index = ("2019-05-01", "2020-04-30")
train_index = ("2016-01-02", "2019-04-30")
val_index = ("2018-10-01", "2019-04-30")
| StarcoderdataPython |
1697171 | <reponame>saurabhchardereal/kernel-tracker
import json
from urllib.request import Request, urlopen
class TelegramUtils:
def __init__(self, API: str) -> None:
self.API = API
self.API_URL = f"https://api.telegram.org/bot{self.API}/sendMessage"
self.url_data = {}
def send_to_tg(self, chat_id: str, kvd: dict) -> str:
"""Sends a nicely formatted notification to given Telegram chat_id"""
self.url_data["chat_id"] = chat_id
self.url_data["parse_mode"] = "Markdown"
for rel in kvd:
for ver in kvd[rel]:
self.url_data["reply_markup"] = {"inline_keyboard": []}
message = f"""\
*New kernel release detected!*\
\nrelease: `{rel}`\
\nversion: `{ver}`\
\ndate: `{kvd[rel][ver]['date']}`
"""
for i in kvd[rel][ver]:
if i == "date":
continue
self.url_data["reply_markup"]["inline_keyboard"].append(
[{"text": f"{i}", "url": f"{kvd[rel][ver][i]}"}]
)
self.url_data["text"] = message
self._make_send_request()
def _make_send_request(self):
req = Request(self.API_URL, data=json.dumps(self.url_data).encode())
req.add_header("Content-Type", "application/json")
with urlopen(req) as fp:
status = fp.read()
| StarcoderdataPython |
66609 | ################################################################################
# Copyright (C) 2016-2019 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import os
import sys
import argparse
import re
import pandas as pd
def RunMain():
userArgs = sys.argv[1:]
argParser = argparse.ArgumentParser()
argParser.add_argument("current_file", help="path where the current results are located")
argParser.add_argument("new_file", help="path where the new files are located")
argParser.add_argument("combined_file", help="path where the combined results are located")
args = argParser.parse_args(userArgs)
currentFileName = args.current_file
newFileName = args.new_file
combinedFileName = args.combined_file
current_data = pd.read_csv(currentFileName)
headers = current_data.columns.values.tolist()
keys = headers[0:len(headers)-4]
new_data = pd.read_csv(newFileName)
result1 = pd.merge(current_data, new_data, on=keys, how='inner')
result = result1.rename(columns={'eff_x':'eff_current','eff_y':'eff_new','rocblas-Gflops_x':'rocblas-Gflops_current', 'rocblas-Gflops_y':'rocblas-Gflops_new','us_x':'us_current','us_y':'us_new','counts_x':'counts_current','score_x':'score_current','counts_y':'counts_new','score_y':'score_new','wa_x':'wa_current','wa_y':'wa_new'})
result['percent gain'] = 100.0 * (result['rocblas-Gflops_new'] - result['rocblas-Gflops_current']) /result['rocblas-Gflops_current']
result['weighted gain'] = result['percent gain'] * result['wa_new'] / result['rocblas-Gflops_new']
result['call count'] = result['weighted gain'] / result['percent gain']
result['overall gain'] = sum(result['weighted gain']) / sum(result['call count'])
result.to_csv(combinedFileName, header=True, index=False)
inputFileBaseName = os.path.basename(combinedFileName)
outputDir = os.path.dirname(combinedFileName)
namePart, _ = os.path.splitext(inputFileBaseName)
excelFileName = os.path.join(outputDir, namePart + ".xlsx")
result.to_excel(excelFileName)
if __name__ == "__main__":
RunMain()
| StarcoderdataPython |
3225143 | <reponame>sumedhpb/testrunner<filename>pytests/upgrade/xdcr_upgrade_collections.py
import queue
import copy, json
from .newupgradebasetest import NewUpgradeBaseTest
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from couchbase_helper.documentgenerator import BlobGenerator
from membase.api.rest_client import RestConnection, RestHelper
from membase.api.exception import RebalanceFailedException
from membase.helper.cluster_helper import ClusterOperationHelper
from memcached.helper.kvstore import KVStore
from fts.stable_topology_fts import StableTopFTS
from pytests.fts.fts_callable import FTSCallable
from couchbase.cluster import Cluster, PasswordAuthenticator
from security.rbac_base import RbacBase
from threading import Thread
from collection.collections_cli_client import CollectionsCLI
from collection.collections_rest_client import CollectionsRest
from collection.collections_stats import CollectionsStats
class XDCRUpgradeCollectionsTests(NewUpgradeBaseTest):
def setUp(self):
super(XDCRUpgradeCollectionsTests, self).setUp()
self.nodes_init = self.input.param('nodes_init', 2)
self.queue = queue.Queue()
self.rate_limit = self.input.param("rate_limit", 100000)
self.batch_size = self.input.param("batch_size", 1000)
self.doc_size = self.input.param("doc_size", 100)
self.loader = self.input.param("loader", "high_doc_ops")
self.instances = self.input.param("instances", 4)
self.threads = self.input.param("threads", 5)
self.use_replica_to = self.input.param("use_replica_to", False)
self.index_name_prefix = None
self.rest_src = RestConnection(self.servers[0])
def tearDown(self):
super(XDCRUpgradeCollectionsTests, self).tearDown()
def enable_migration_mode(self, src_bucket, dest_bucket):
setting_val_map = {"collectionsMigrationMode": "true",
"colMappingRules": '{"REGEXP_CONTAINS(META().id,\'0$\')":"scope1.mycollection_scope1"}'
}
self.rest_src.set_xdcr_params(src_bucket, dest_bucket, setting_val_map)
def verify_doc_counts(self):
des_master = self.servers[self.nodes_init]
src_cbver = RestConnection(self.master).get_nodes_version()
des_cbver = RestConnection(des_master).get_nodes_version()
src_items = RestConnection(self.master).get_buckets_itemCount()
des_items = RestConnection(des_master).get_buckets_itemCount()
if src_cbver[:3] < "7.0" and des_cbver[:3] >= "7.0":
des_items = self.get_col_item_count(des_master, "default", "_default",
"_default", self.des_stat_col)
if src_items["default"] != des_items:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items["default"], des_items))
elif src_cbver[:3] >= "7.0" and des_cbver[:3] < "7.0":
src_items = self.get_col_item_count(self.master, "default", "_default",
"_default", self.stat_col)
if src_items != des_items["default"]:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items, des_items["default"]))
elif src_cbver[:3] >= "7.0" and des_cbver[:3] >= "7.0":
if src_items["default"] != des_items["default"]:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items["default"], des_items["default"]))
def test_xdcr_upgrade_with_services(self):
after_upgrade_services_in = self.input.param("after_upgrade_services_in", False)
after_upgrade_buckets_in = self.input.param("after_upgrade_buckets_in", False)
after_upgrade_buckets_out = self.input.param("after_upgrade_buckets_out", False)
after_upgrade_buckets_flush = self.input.param("after_upgrade_buckets_flush", False)
# Install initial version on the specified nodes
self._install(self.servers[:self.nodes_init])
# Configure the nodes with services on cluster1
self.operations(self.servers[:self.nodes_init], services="kv,kv")
# get the n1ql node which will be used in pre,during and post upgrade for running n1ql commands
self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql")
# Run the pre upgrade operations, typically creating index
self.pre_upgrade(self.servers[:self.nodes_init])
if self.input.param("ddocs_num", 0) > 0:
self.create_ddocs_and_views()
self._install(self.servers[self.nodes_init:self.num_servers])
self.master = self.servers[self.nodes_init]
# Configure the nodes with services on the other cluster2
try:
self.operations(self.servers[self.nodes_init:self.num_servers], services="kv,kv")
self.sleep(timeout=10)
except Exception as ex:
if ex:
print("error: ", str(ex))
self.log.info("bucket is created")
# create a xdcr relationship between cluster1 and cluster2
self.rest_src.add_remote_cluster(self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port,
'Administrator', 'password', "C2")
repl_id = self.rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
# Run the post_upgrade operations
self._create_ephemeral_buckets()
self.post_upgrade(self.servers[:self.nodes_init])
# Add new services after the upgrade
for upgrade_version in self.upgrade_versions:
src_nodes = self.servers[:self.nodes_init]
for server in src_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
src_upgrade_threads = self._async_update(upgrade_version, src_nodes)
for upgrade_thread in src_upgrade_threads:
upgrade_thread.join()
src_success_upgrade = True
while not self.queue.empty():
src_success_upgrade &= self.queue.get()
if not src_success_upgrade:
self.fail("Upgrade failed in source cluster. See logs above!")
else:
self.log.info("Upgrade source cluster success")
des_nodes = self.servers[self.nodes_init:self.num_servers]
self.master = self.servers[self.nodes_init]
for server in des_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
des_upgrade_threads = self._async_update(upgrade_version, des_nodes)
for upgrade_thread in des_upgrade_threads:
upgrade_thread.join()
des_success_upgrade = True
while not self.queue.empty():
des_success_upgrade &= self.queue.get()
if not des_success_upgrade:
self.fail("Upgrade failed in des cluster. See logs above!")
else:
self.log.info("Upgrade des cluster success")
self.master = self.servers[0]
self.rest = RestConnection(self.master)
self.rest_col = CollectionsRest(self.master)
self.cli_col = CollectionsCLI(self.master)
self.stat_col = CollectionsStats(self.master)
self.log.info("Create scope collection at src cluster")
#self.rest_col.create_scope_collection_count()
self._create_scope_collection(self.rest_col, self.cli_col, self.buckets[0].name)
self.sleep(10)
self.des_rest = RestConnection(self.servers[self.nodes_init])
self.des_rest_col = CollectionsRest(self.servers[self.nodes_init])
self.des_cli_col = CollectionsCLI(self.servers[self.nodes_init])
self.des_stat_col = CollectionsStats(self.servers[self.nodes_init])
self.log.info("Create scope collection at des cluster")
self.buckets = RestConnection(self.servers[self.nodes_init]).get_buckets()
self._create_scope_collection(self.des_rest_col, self.des_cli_col, self.buckets[0].name)
self.load_to_collections_bucket()
self.enable_migration = self.input.param("enable_migration", False)
if self.enable_migration:
self.enable_migration_mode(self.buckets[0].name, self.buckets[0].name)
self.verify_doc_counts()
if after_upgrade_buckets_in is not False:
self.bucket_size = 100
self._create_sasl_buckets(self.master, 1)
self._create_standard_buckets(self.master, 1)
if self.input.param("ddocs_num", 0) > 0:
self.create_ddocs_and_views()
gen_load = BlobGenerator('upgrade', 'upgrade-', self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", self.expire_time,
flag=self.item_flag)
# deleting buckets after upgrade
if after_upgrade_buckets_out is not False:
self._all_buckets_delete(self.master)
# flushing buckets after upgrade
if after_upgrade_buckets_flush is not False:
self._all_buckets_flush()
def run_view_queries(self):
view_query_thread = Thread(target=self.view_queries, name="run_queries",
args=(self.run_view_query_iterations,))
return view_query_thread
def view_queries(self, iterations):
query = {"connectionTimeout": 60000}
for count in range(iterations):
for i in range(self.view_num):
self.cluster.query_view(self.master, self.ddocs[0].name,
self.default_view_name + str(i), query,
expected_rows=None, bucket="default", retry_time=2)
def create_user(self, node):
self.log.info("inside create_user")
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'password': 'password'}]
rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'roles': 'admin'}]
self.log.info("before create_user_source")
RbacBase().create_user_source(testuser, 'builtin', node)
self.log.info("before add_user_role")
RbacBase().add_user_role(rolelist, RestConnection(node), 'builtin')
| StarcoderdataPython |
56085 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
import regex as re
fontmap_directory = os.path.dirname(__file__) + '/fontmaps/'
fontmaps = {}
for font in ['JG_Pahawh_Third_Version', 'JG_Pahawh_Final_Version']:
fontmaps['{}.ttf'.format(font)] = json.load(open(fontmap_directory + '{}.json'.format(font)))
def _decode_Myanmar1(string):
string = string.replace('\u1039\u101a', '\u103b')
string = string.replace('\u1039\u101b', '\u103c')
string = string.replace('\u1039\u101d', '\u103d')
string = string.replace('\u1039\u101f', '\u103e')
string = re.sub(r'\u1004\u1039([\u1000-\u1021\u1025])', '\u1004\u103a\u1039\g<1>', string)
string = string.replace('\u101e\u1039\u101e', '\u103f')
string = re.sub(r'([\u1036-\u1038])(\u1039)', '\g<2>\g<1>', string)
string = re.sub(r'\u1039(?![\u1000-\u1003\u1005-\u1008\u100b\u100c\u100f-\u1019\u101c])', '\u103a', string)
string = re.sub(r'([\u1001\u1002\u1004\u1012\u1015\u101d])\u102c', '\g<1>\u102b', string)
string = re.sub(r'([\u102f\u1030])([\u102d\u102e\u1032])', '\g<2>\g<1>', string)
string = re.sub(r'(\u1036)(\u1037)', '\g<2>\g<1>', string)
string = re.sub(r'[\u200c\u200d]', '', string)
return string
def decode(string, font):
if font == 'Myanmar1.ttf': return _decode_Myanmar1(string)
output = ''
for c in string:
try:
for char in fontmaps[font][hex(ord(c))].split():
output += (chr(int(char, 16)))
except KeyError:
output += c
return output
| StarcoderdataPython |
4814028 | <gh_stars>0
from tkinter import *
from tkinter.filedialog import askopenfilename
import xlrd
import pandas as pd
from tkinter.ttk import Combobox
from PIL import Image,ImageTk
import clusters1
dendogram_file_name='clusters.jpg'
class PoliCluster:
def __init__(self,data_center):
self.data_center=data_center
self.root=Tk()
self.frame1=None
self.frame2=None
self.Frame3=None
self.canvas=None
self.initUI()
def interface(self):
self.root.title('Clustering')
self.root.geometry('730x600+420+70')
self.root.deiconify()
Tk.mainloop(self.root)
def initUI(self):
self.frame1=Frame(self.root)
self.frame2=Frame(self.root)
self.frame3=Frame(self.root)
self.frame1.pack(fill=BOTH)
self.main_label = Label(self.frame1, text='Election Data Analysis Tool v.1.0', bg='red', fg='white',font=('Times', 14, 'bold'))
self.loadDataB = Button(self.frame1, text='Load Election Data', height=2, width=27,command=self.load_data_button)
self.clusDisB = Button(self.frame1, text='Cluster Districts', height=4, width=16,command=self.cluster_distrcits_but)
self.clusPolB = Button(self.frame1, text='Cluster Political Parties', height=4, width=18)
self.frame2.pack(expand=True,fill=BOTH)
self.x_scroll=Scrollbar(self.frame2,orient=HORIZONTAL)
self.y_scroll=Scrollbar(self.frame2,orient=VERTICAL)
self.canvas=Canvas(self.frame2,xscrollcommand=self.x_scroll.set,yscrollcommand=self.y_scroll.set)
self.frame3.pack(expand=True)
self.dist_lab=Label(self.frame3,text='Districts')
self.dist_scroll=Scrollbar(self.frame3)
self.dist_listb=Listbox(self.frame3,yscrollcommand=self.dist_scroll.set,height=10,selectmode=EXTENDED)
self.combox_label=Label(self.frame3,text='Threshold')
self.combox = Combobox(self.frame3,values=['0%', '1%', '10%', '20%', '30%', '40%', '50%'], width=6, state="readonly")
self.refine_but=Button(self.frame3,text='Refine Analysis')
self.main_label.pack(fill=X, expand=True, anchor=N)
self.loadDataB.pack(expand=True, anchor=N)
self.clusDisB.pack(side=LEFT, expand=True, anchor=NE)
self.clusPolB.pack(side=LEFT, expand=True, anchor=NW)
self.x_scroll.pack(side=BOTTOM, fill=X)
self.y_scroll.pack(side=RIGHT, fill=Y)
self.canvas.pack(fill=BOTH, expand=True)
self.x_scroll.configure(command=self.canvas.xview)
self.y_scroll.configure(command=self.canvas.yview)
self.dist_lab.pack(side=LEFT)
self.dist_listb.pack(side=LEFT)
self.dist_scroll.pack(side=LEFT,fill=Y)
self.dist_scroll.configure(command=self.dist_listb.yview)
self.combox_label.pack(side=LEFT)
self.combox.pack(side=LEFT)
self.combox.current(0)
self.refine_but.pack(side=LEFT)
def load_data_button(self):
self.file_path = askopenfilename(initialdir='/', title='Select file',filetypes=(('excel files', '*.xlsx'), ('all files', '*.*')))
if self.file_path != '':
self.data_center.parse_data(self.file_path)
for name in self.data_center.districts:
self.dist_listb.insert(END, name)
def cluster_distrcits_but(self):
self.dist_listb.selection_clear(0,END)
if self.file_path != '':
try:
self.data_center.cluster_dists([],threshold=int(self.combox.get()[:-1]))
dendrogram_image = Image.open(DENDROGRAM_FILE_NAME)
self.dendrogram_image = ImageTk.PhotoImage(dendrogram_image)
self.canvas.create_image(0, 0, image=self.dendrogram_image, anchor='nw')
except (ZeroDivisionError, IndexError):
self.canvas.delete("all")
finally:
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
self.place_analysis_on_grid()
class Data_Center:
def __init__(self):
self.whole={}
self.districts=[]
self.parties=[]
self.political_party_vote_percentages={}
vote=0
def parse_data(self,file_path):
workbook = xlrd.open_workbook(file_path)
worksheet = workbook.sheet_by_index(0)
try:
for i in range(9, 21):
party = worksheet.cell_value(10, i)
self.parties.append(party)
for i in range(11,50):
district=worksheet.cell_value(i,2)
self.districts.append(district)
for i in range(11,50):
vote=worksheet.cell_value(i,9)
vote+=vote
self.political_party_vote_percentages['SAADET']=float((8569494/vote))*100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 10)
vote += vote
self.political_party_vote_percentages['BTP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 11)
vote += vote
self.political_party_vote_percentages['TKP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 12)
vote += vote
self.political_party_vote_percentages['VATAN'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 13)
vote += vote
self.political_party_vote_percentages['BBP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 14)
vote += vote
self.political_party_vote_percentages['AK PARTİ'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 15)
vote += vote
self.political_party_vote_percentages['CHP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 16)
vote += vote
self.political_party_vote_percentages['DP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 17)
vote += vote
self.political_party_vote_percentages['MHP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 18)
vote += vote
self.political_party_vote_percentages['İYİ PARTİ'] = float((8569494 / vote))* 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 19)
vote += vote
self.political_party_vote_percentages['HDP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 20)
vote += vote
self.political_party_vote_percentages['DSP'] = float((8569494 / vote)) * 100.0
except ZeroDivisionError:
vote=0.0
saadet = pd.read_excel(file_path, usecols=[9], skiprows=10)
btp = pd.read_excel(file_path, usecols=[10], skiprows=10)
tkp = pd.read_excel(file_path, usecols=[11], skiprows=10)
vatan = pd.read_excel(file_path, usecols=[12], skiprows=10)
bbp = pd.read_excel(file_path, usecols=[13], skiprows=10)
chp = pd.read_excel(file_path, usecols=[14], skiprows=10)
ak = pd.read_excel(file_path, usecols=[15], skiprows=10)
dp = pd.read_excel(file_path, usecols=[16], skiprows=10)
mhp = pd.read_excel(file_path, usecols=[17], skiprows=10)
iyi = pd.read_excel(file_path, usecols=[18], skiprows=10)
hdp = pd.read_excel(file_path, usecols=[19], skiprows=10)
dsp = pd.read_excel(file_path, usecols=[20], skiprows=10)
saadet_dict = saadet.to_dict()
btp_dict = btp.to_dict()
tkp_dict = tkp.to_dict()
vatan_dict = vatan.to_dict()
bbp_dict = bbp.to_dict()
chp_dict = chp.to_dict()
ak_dict = ak.to_dict()
dp_dict = dp.to_dict()
mhp_dict = mhp.to_dict()
iyi_dict = iyi.to_dict()
hdp_dict = hdp.to_dict()
dsp_dict = dsp.to_dict()
self.whole = (
saadet_dict, btp_dict, tkp_dict, vatan_dict, bbp_dict, chp_dict, ak_dict, dp_dict, mhp_dict, iyi_dict, hdp_dict,
dsp_dict)
def cluster_dists(self,selected_districts, threshold=0):
assert self.whole != {}
if not selected_districts:
selected_districts = self.districts
political_party_names=[political_party for political_party in self.parties
if self.political_party_vote_percentages[political_party] >= threshold]
cluster_matrix = [[0.0] * len(political_party_names) for i in range(len(selected_districts))]
for i in range(len(selected_districts)):
for j in range(len(political_party_names)):
cluster_matrix[i][j] = self.districts[selected_districts[i]] \
.get_political_party_percentage(political_party_names[j]),
cluster = hcluster(cluster_matrix, distance=sim_distance)
drawdendrogram(cluster, selected_districts)
def main():
data_center=Data_Center()
g=PoliCluster(data_center)
g.interface()
main()
#di['CHP']['Adalar']=43
| StarcoderdataPython |
3302628 | <gh_stars>0
import logging
from datetime import datetime, timezone
from queue import Queue
from threading import Lock
from dateutil import parser as dateparser
from google.api_core.exceptions import BadRequest
from google.cloud import bigquery, storage
LOG = logging.getLogger("smart_archiver." + __name__)
def event_is_fresh(data, context):
"""Ensure a background Cloud Function only executes within a certain time
period after the triggering event.
Args: data (dict): The event payload. context
(google.cloud.functions.Context): The event metadata. Returns:
None; output is written to Stackdriver Logging
"""
if data is None:
LOG.debug("Running outside of Cloud Functions.")
return True
LOG.debug(context)
timestamp = context.timestamp
event_time = dateparser.parse(timestamp)
event_age = (datetime.now(timezone.utc) - event_time).total_seconds()
event_age_ms = event_age * 1000
# Ignore events that are too old
# TODO: Should this be configurable?
max_age_ms = 10000
if event_age_ms > max_age_ms:
LOG.info('Event is too old. Dropping {} (age {}ms)'.format(
context.event_id, event_age_ms))
return False
return True
def load_config_file(filepath, required=[], defaults={}):
"""Loads configuration file into module variables."""
config = defaults
config_file = open(filepath, "r")
for line in config_file:
# ignore comments
if line.startswith('#'):
continue
# parse the line
tokens = line.split('=')
if len(tokens) != 2:
LOG.info("Error parsing config tokens: %s", tokens)
continue
k, v = tokens
config[k.strip()] = v.strip()
# quick validation
for r in required:
if r not in config.keys() or config[r] == "CONFIGURE_ME":
LOG.error('Missing required config item: {}'.format(r))
exit(1)
return config
def get_bq_client(config):
"""Get a BigQuery client.
Returns: google.cloud.bigquery.Client -- A BigQuery client.
"""
return bigquery.Client(project=config["BQ_JOB_PROJECT"] if "BQ_JOB_PROJECT"
in config else config["PROJECT"])
def get_gcs_client(config):
"""Get a GCS client.
Returns: google.cloud.storage.Client -- A GCS client.
"""
return storage.Client(project=config["PROJECT"])
def get_bucket_and_object(resource_name):
"""Given an audit log resourceName, parse out the bucket name and object
path within the bucket.
Returns: (str, str) -- ([bucket name], [object name])
"""
pathparts = resource_name.split("buckets/", 1)[1].split("/", 1)
bucket_name = pathparts[0]
object_name = pathparts[1].split("objects/", 1)[1]
return (bucket_name, object_name)
def initialize_table(config, name, schema):
"""Creates, if not found, a table.
Arguments: name {string} -- The fully qualified table name.
schema {string} -- The schema portion of a BigQuery CREATE TABLE DDL
query. For example: "resourceName STRING" Returns:
google.cloud.bigquery.table.RowIterator -- Result of the query. Since
this is a DDL query, this will always be empty if it succeeded.
Raises: google.cloud.exceptions.GoogleCloudError –- If the job
failed. concurrent.futures.TimeoutError –- If the job did not
complete in the given timeout.
"""
bq = get_bq_client(config)
LOG.info("Creating table %s if not found.", name)
querytext = """
CREATE TABLE IF NOT EXISTS `{}` (
{}
)""".format(name, schema)
LOG.debug("Query: \n{}".format(querytext))
query_job = bq.query(querytext)
return query_job.result()
class BigQueryOutput():
def __init__(self, config, tablename, schema):
self.lock = Lock()
self.client = get_bq_client(config)
self.rows = list()
self.tablename = tablename
self.batch_size = int(config["BQ_BATCH_WRITE_SIZE"])
self.insert_count = 0
initialize_table(config, tablename, schema)
def put(self, row):
self.rows.append(row)
self.lock.acquire()
if len(self.rows) >= self.batch_size:
self.flush()
self.lock.release()
def flush(self):
LOG.debug("Flushing %s rows to %s.", len(self.rows), self.tablename)
try:
insert_errors = self.client.insert_rows_json(
self.tablename, self.rows)
if insert_errors:
LOG.error("Insert errors! %s",
[x for x in flatten(insert_errors)])
except BadRequest as error:
if not error.message.endswith("No rows present in the request."):
LOG.error("Insert error! %s", error.message)
raise error
finally:
self.insert_count += len(self.rows)
self.rows = list()
def stats(self):
return "{} rows inserted into {}".format(self.insert_count, self.tablename)
def flatten(iterable, iter_types=(list, tuple)):
"""Flattens nested iterables into a flat iterable."""
for i in iterable:
if isinstance(i, iter_types):
for j in flatten(i, iter_types):
yield j
else:
yield i
| StarcoderdataPython |
1785410 | #!/usr/bin/env python
#
# textpanel.py - A panel for displaying horizontal or vertical text.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`TextPanel` class, for displaying
some text, oriented either horizontally or vertically.
"""
import wx
class TextPanel(wx.Panel):
"""A :class:`wx.PyPanel` which may be used to display a string of
text, oriented either horizontally or vertically.
"""
def __init__(self, parent, text=None, orient=wx.HORIZONTAL, **kwargs):
"""Create a ``TextPanel``.
:arg parent: The :mod:`wx` parent object.
:arg text: The text to display. This can be changed via
:meth:`SetText`.
:arg orient: Text orientation - either ``wx.HORIZONTAL`` (the
default) or ``wx.VERTICAL``. This can be changed
later via :meth:`SetOrient`.
All other arguments are passed through to ``wx.Panel.__init__`` .
"""
super().__init__(parent, **kwargs)
self.Bind(wx.EVT_PAINT, self.Draw)
self.Bind(wx.EVT_SIZE, self.__onSize)
self.__text = text
self.__size = None
self.__orient = None
self.SetOrient(orient)
def SetOrient(self, orient):
"""Sets the orientatino of the text on this ``TextPanel``.
:arg orient: Either ``wx.HORIZONTAL`` or ``wx.VERTICAL``.
"""
if orient not in (wx.HORIZONTAL, wx.VERTICAL):
raise ValueError('TextPanel orient must be '
'wx.HORIZONTAL or wx.VERTICAL')
self.__orient = orient
# trigger re-calculation of
# text extents and a refresh
self.SetLabel(self.__text)
def DoGetBestClientSize(self):
"""Returns the best (minimum) size for this ``TextPanel``. """
size = wx.Size(self.__size)
self.CacheBestSize(size)
return size
def SetLabel(self, text):
"""Sets the text shown on this ``TextPanel``."""
dc = wx.ClientDC(self)
self.__text = text
if text is None:
self.SetMinSize((0, 0))
return
width, height = dc.GetTextExtent(text)
if self.__orient == wx.VERTICAL:
width, height = height, width
self.__size = (width, height)
self.SetMinSize((width, height))
self.Refresh()
def __onSize(self, ev):
"""Called when this ``TextPanel`` is resized. Triggers a refresh. """
self.Refresh()
ev.Skip()
def Draw(self, ev=None):
"""Draws this ``TextPanel``. """
bg = self.GetBackgroundColour()
fg = self.GetForegroundColour()
self.ClearBackground()
if self.__text is None or self.__text == '':
return
if ev is None: dc = wx.ClientDC(self)
else: dc = wx.PaintDC( self)
if not dc.IsOk():
return
dc.SetBackground(wx.Brush(bg))
dc.SetTextForeground(fg)
dc.Clear()
paneW, paneH = dc.GetSize().Get()
textW, textH = self.__size
x = int((paneW - textW) / 2.0)
y = int((paneH - textH) / 2.0)
if self.__orient == wx.VERTICAL:
dc.DrawRotatedText(self.__text, x, paneH - y, 90)
else:
dc.DrawText(self.__text, x, y)
| StarcoderdataPython |
52572 | import hammer as h
signals = h.choice(
h.token("hmi.signal1"),
h.token("hmi.signal2"),
h.token("hmi.signal3"))
| StarcoderdataPython |
143518 | <reponame>lyskevin/cpbook-code
import math
INF = 10**9
EPS = 1e-9
def DEG_to_RAD(d):
return d*math.pi/180.0
def RAD_to_DEG(r):
return r*180.0/math.pi
class point_i:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __lt__(self, other):
return (self.x, self.y) < (other.x, other.y)
def __eq__(self, other):
return math.isclose(self.x, other.x) and math.isclose(self.y, other.y)
def dist(p1, p2):
return math.hypot(p1.x-p2.x, p1.y-p2.y)
def rotate(p, theta):
rad = DEG_to_RAD(theta)
x = p.x * math.cos(rad) - p.y * math.sin(rad)
y = p.x * math.sin(rad) + p.y * math.cos(rad)
return point(x, y)
class line:
def __init__(self):
self.a = 0
self.b = 0
self.c = 0
def pointsToLine(p1, p2, l):
if abs(p1.x - p2.x) < EPS:
l.a, l.b, l.c = 1.0, 0.0, -p1.x
else:
a = -(p1.y - p2.y) / (p1.x - p2.x)
l.a, l.b, l.c = a, 1.0, -(a * p1.x) - p1.y
class line2:
def __init__(self):
self.m = 0
self.c = 0
def pointsToLine2(p1, p2, l):
if p1.x == p2.x:
l.m = INF
l.c = p1.x
return 0
else:
l.m = (p1.y - p2.y) / (p1.x - p2.x)
l.c = p1.y - l.m * p1.x
return 1
def areParallel(l1, l2):
return math.isclose(l1.a, l2.a) and math.isclose(l1.b, l2.b)
def areSame(l1, l2):
return areParallel(l1, l2) and math.isclose(l1.c, l2.c)
def areIntersect(l1, l2, p):
if areParallel(l1, l2):
return False
p.x = (l2.b * l1.c - l1.b * l2.c) / (l2.a * l1.b - l1.a * l2.b)
if not math.isclose(l1.b, 0.0):
p.y = -(l1.a * p.x + l1.c)
else:
p.y = -(l2.a * p.x + l2.c)
return True
class vec:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def toVec(a, b):
return vec(b.x-a.x, b.y-a.y)
def scale(v, s):
return vec(v.x*s, v.y*s)
def translate(p, v):
return point(p.x+v.x, p.y+v.y)
def pointSlopeToLine(p, m, l):
l.a, l.b = -m, 1
l.c = -((l.a * p.x) + (l.b * p.y))
def closestPoint(l, p, ans):
if math.isclose(l.b, 0.0):
ans.x, ans.y = -l.c, p.y
return
if math.isclose(l.a, 0.0):
ans.x, ans.y = p.x, -l.c
return
perpendicular = line()
pointSlopeToLine(p, 1.0/l.a, perpendicular)
areIntersect(l, perpendicular, ans)
def reflectionPoint(l, p, ans):
b = point()
closestPoint(l, p, b)
v = toVec(p, b)
ans.x, ans.y = p.x + 2 * v.x, p.y + 2 * v.y
def dot(a, b):
return a.x * b.x + a.y * b.y
def norm_sq(v):
return v.x * v.x + v.y * v.y
def angle(a, o, b):
oa = toVec(o, a)
ob = toVec(o, b)
return math.acos(dot(oa, ob) / math.sqrt(norm_sq(oa) * norm_sq(ob)))
def distToLine(p, a, b, c):
ap = toVec(a, p)
ab = toVec(a, b)
u = dot(ap, ab) / norm_sq(ab)
s = scale(ab, u)
c.x, c.y = a.x+s.x, a.y+s.y
return dist(p, c)
def distToLineSegment(p, a, b, c):
ap = toVec(a, p)
ab = toVec(a, b)
u = dot(ap, ab) / norm_sq(ab)
if u < 0.0:
c.x, c.y = a.x, a.y
return dist(p, a)
if u > 1.0:
c.x, c.y = b.x, b.y
return dist(p, b)
return distToLine(p, a, b, c)
def cross(a, b):
return a.x * b.y - a.y * b.x
def ccw(p, q, r):
return cross(toVec(p, q), toVec(p, r)) > -EPS
def collinear(p, q, r):
return abs(cross(toVec(p, q), toVec(p, r))) < EPS
if __name__ == '__main__':
P = [point(2e-9, 0), point(0, 2), point(1e-9, 1)]
P = sorted(P)
for pt in P:
print('%.9lf, %.9lf' % (pt.x, pt.y))
P1 = point()
P2 = point()
P3 = point(0, 1)
print('%d' % (P1 == P2))
print('%d' % (P1 == P3))
P = [point(2, 2), point(4, 3), point(2, 4), point(6, 6), point(2, 6), point(6, 5)]
P = sorted(P)
for p in P:
print('(%.2lf, %.2lf)' % (p.x, p.y))
P = [point(2, 2), point(4, 3), point(2, 4), point(6, 6), point(2, 6), point(6, 5), point(8, 6)]
d = dist(P[0], P[5])
print('Euclidean distance between P[0] and P[5] = %.2lf' % d)
l1 = line()
l2 = line()
l3 = line()
l4 = line()
pointsToLine(P[0], P[1], l1)
print('%.2lf * x + %.2lf * y + %.2lf = 0.00' % (l1.a, l1.b, l1.c))
pointsToLine(P[0], P[2], l2);
print('%.2lf * x + %.2lf * y + %.2lf = 0.00' % (l2.a, l2.b, l2.c))
pointsToLine(P[2], P[3], l3)
print('l1 & l2 are parallel? %d' % areParallel(l1, l2))
print('l1 & l3 are parallel? %d' % areParallel(l1, l3))
pointsToLine(P[2], P[4], l4)
print('l1 & l2 are the same? %d' % areSame(l1, l2))
print('l2 & l4 are the same? %d' % areSame(l2, l4))
p12 = point()
res = areIntersect(l1, l2, p12)
print('l1 & l2 are intersect? %d, at (%.2lf, %.2lf)' % (res, p12.x, p12.y))
ans = point()
d = distToLine(P[0], P[2], P[3], ans)
print('Closest point from P[0] to line (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
closestPoint(l3, P[0], ans)
print('Closest point from P[0] to line V2 (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, dist(P[0], ans)))
d = distToLineSegment(P[0], P[2], P[3], ans)
print('Closest point from P[0] to line SEGMENT (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
d = distToLineSegment(P[1], P[2], P[3], ans)
print('Closest point from P[1] to line SEGMENT (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
d = distToLineSegment(P[6], P[2], P[3], ans)
print('Closest point from P[6] to line SEGMENT (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
reflectionPoint(l4, P[1], ans)
print('Reflection point from P[1] to line (P[2]-P[4]): (%.2lf, %.2lf)' % (ans.x, ans.y))
print('Angle P[0]-P[4]-P[3] = %.2lf' % RAD_to_DEG(angle(P[0], P[4], P[3])))
print('Angle P[0]-P[2]-P[1] = %.2lf' % RAD_to_DEG(angle(P[0], P[2], P[1])))
print('Angle P[4]-P[3]-P[6] = %.2lf' % RAD_to_DEG(angle(P[4], P[3], P[6])))
print('P[0], P[2], P[3] form A left turn? %d' % ccw(P[0], P[2], P[3]))
print('P[0], P[3], P[2] form A left turn? %d' % ccw(P[0], P[3], P[2]))
print('P[0], P[2], P[3] are collinear? %d' % collinear(P[0], P[2], P[3]))
print('P[0], P[2], P[4] are collinear? %d' % collinear(P[0], P[2], P[4]))
p = point(3, 7)
q = point(11, 13)
r = point(35, 30)
print('r is on the %s of line p-q (direction p->q)' % ('left' if ccw(p, q, r) else 'right'))
A = point(2.0, 2.0)
B = point(4.0, 3.0)
v = toVec(A, B)
C = point(3.0, 2.0)
D = translate(C, v)
print('D = (%.2lf, %.2lf)' % (D.x, D.y))
E = translate(C, scale(v, 0.5))
print('E = (%.2lf, %.2lf)' % (E.x, E.y))
print('B = (%.2lf, %.2lf)' % (B.x, B.y))
F = rotate(B, 90)
print('F = (%.2lf, %.2lf)' % (F.x, F.y))
G = rotate(B, 180)
print('G = (%.2lf, %.2lf)' % (G.x, G.y))
| StarcoderdataPython |
3350102 | <gh_stars>0
class Properties:
"""
Keys to access structure properties in `schnetpack.data.AtomsData`
"""
# geometry
Z = "_atomic_numbers"
charge = "_charge"
atom_mask = "_atom_mask"
position = "_positions"
R = position
cell = "_cell"
pbc = "_pbc"
neighbors = "_neighbors"
neighbor_mask = "_neighbor_mask"
cell_offset = "_cell_offset"
neighbor_pairs_j = "_neighbor_pairs_j"
neighbor_pairs_k = "_neighbor_pairs_k"
neighbor_pairs_mask = "_neighbor_pairs_mask"
neighbor_offsets_j = "_neighbor_offsets_j"
neighbor_offsets_k = "_neighbor_offsets_k"
# chemical properties
energy = "energy"
forces = "forces"
stress = "stress"
dipole_moment = "dipole_moment"
total_dipole_moment = "total_dipole_moment"
polarizability = "polarizability"
iso_polarizability = "iso_polarizability"
at_polarizability = "at_polarizability"
charges = "charges"
energy_contributions = "energy_contributions"
shielding = "shielding"
hessian = "hessian"
dipole_derivatives = "dipole_derivatives"
polarizability_derivatives = "polarizability_derivatives"
electric_field = "electric_field"
magnetic_field = "magnetic_field"
dielectric_constant = "dielectric_constant"
magnetic_moments = "magnetic_moments"
properties = [
energy,
forces,
stress,
dipole_moment,
polarizability,
shielding,
hessian,
dipole_derivatives,
polarizability_derivatives,
electric_field,
magnetic_field,
]
external_fields = [electric_field, magnetic_field]
electric_properties = [
dipole_moment,
dipole_derivatives,
dipole_derivatives,
polarizability_derivatives,
polarizability,
]
magnetic_properties = [shielding]
required_grad = {
energy: [],
forces: [position],
hessian: [position],
dipole_moment: [electric_field],
polarizability: [electric_field],
dipole_derivatives: [electric_field, position],
polarizability_derivatives: [electric_field, position],
shielding: [magnetic_field, magnetic_moments],
}
from local.src.schnetpack.atomistic import AtomisticModel
from local.src.schnetpack.data import *
from local.src.schnetpack import atomistic
from local.src.schnetpack import data
from local.src.schnetpack import datasets
from local.src.schnetpack import environment
from local.src.schnetpack import interfaces
from local.src.schnetpack import md
from local.src.schnetpack.train import metrics, hooks
from local.src.schnetpack import nn
from local.src.schnetpack import representation
from local.src.schnetpack import train
from local.src.schnetpack import utils
from local.src.schnetpack.representation import SchNet
from local.src.schnetpack.utils import __init__
| StarcoderdataPython |
112079 | import os
import pytest
from torchvision.transforms import Resize, ToTensor
from continuum.datasets import CUB200
from continuum.scenarios import ClassIncremental
DATA_PATH = os.environ.get("CONTINUUM_DATA_PATH")
'''
Test the visualization with instance_class scenario
'''
@pytest.mark.slow
def test_scenario_CUB200_ClassIncremental():
dataset = CUB200(DATA_PATH, train=True, transform=None)
scenario = ClassIncremental(dataset, increment=100, transformations=[Resize((224, 224)), ToTensor()])
print(f"Nb classes : {scenario.nb_classes} ")
print(f"Nb tasks : {scenario.nb_tasks} ")
for task_id, task_set in enumerate(scenario):
print(f"Task {task_id} : {task_set.nb_classes} classes")
task_set.plot(path="Archives/Samples/CUB200/CI",
title="CUB200_InstanceIncremental_{}.jpg".format(task_id),
nb_samples=100)
| StarcoderdataPython |
1670598 | <gh_stars>1-10
from foo import Foo
print(Foo().scope) | StarcoderdataPython |
105378 | from oauth.oauth import OAuthRequest, OAuthServer, build_authenticate_header,\
OAuthSignatureMethod_PLAINTEXT, OAuthSignatureMethod_HMAC_SHA1
from django.conf import settings
from django.http import HttpResponse
from stores import DataStore
import ast
OAUTH_REALM_KEY_NAME = getattr(settings, 'OAUTH_REALM_KEY_NAME', '')
OAUTH_SIGNATURE_METHODS = getattr(settings, 'OAUTH_SIGNATURE_METHODS', ['plaintext', 'hmac-sha1'])
def initialize_server_request(request):
"""Shortcut for initialization."""
# OAuth change
# Django converts Authorization header in HTTP_AUTHORIZATION
# Warning: it doesn't happen in tests but it's useful, do not remove!
auth_header = {}
if 'Authorization' in request.META:
auth_header = {'Authorization': request.META['Authorization']}
elif 'HTTP_AUTHORIZATION' in request.META:
auth_header = {'Authorization': request.META['HTTP_AUTHORIZATION']}
# Don't include extra parameters when request.method is POST and
# request.MIME['CONTENT_TYPE'] is "application/x-www-form-urlencoded"
# (See http://oauth.net/core/1.0a/#consumer_req_param).
# But there is an issue with Django's test Client and custom content types
# so an ugly test is made here, if you find a better solution...
parameters = {}
if request.method == "POST" and request.META.get('CONTENT_TYPE') != "application/json" \
and (request.META.get('CONTENT_TYPE') == "application/x-www-form-urlencoded" \
or request.META.get('SERVER_NAME') == 'testserver'):
# lou -w -When POST statement data, the actual data is a dict key and has a value of ''
# have to parse it out correctly...
p = dict(request.REQUEST.items())
if p.values()[0] == '':
# literal eval is putting them in differnt order
parameters = ast.literal_eval(p.keys()[0])
else:
parameters = p
oauth_request = OAuthRequest.from_request(request.method,
request.build_absolute_uri(),
headers=auth_header,
parameters=parameters,
query_string=request.META.get('QUERY_STRING', ''))
if oauth_request:
oauth_server = OAuthServer(DataStore(oauth_request))
if 'plaintext' in OAUTH_SIGNATURE_METHODS:
oauth_server.add_signature_method(OAuthSignatureMethod_PLAINTEXT())
if 'hmac-sha1' in OAUTH_SIGNATURE_METHODS:
oauth_server.add_signature_method(OAuthSignatureMethod_HMAC_SHA1())
else:
oauth_server = None
return oauth_server, oauth_request
def send_oauth_error(err=None):
"""Shortcut for sending an error."""
# send a 401 error
# lou w - be able to send plain error messages
if isinstance(err, str):
response = HttpResponse(err, mimetype="text/plain")
else:
response = HttpResponse(err.message.encode('utf-8'), mimetype="text/plain")
response.status_code = 401
# return the authenticate header
header = build_authenticate_header(realm=OAUTH_REALM_KEY_NAME)
for k, v in header.iteritems():
response[k] = v
return response
| StarcoderdataPython |
3278057 | <filename>order_fulfillment/order_fulfillment_multi_item.py
# This code contains all heuristics for multi-item orders, namely LSC, SPS and Greedy.
# It takes the data as input and returns the cost and store assignment as output.
import itertools
from functools import reduce
import operator
from pyomo.environ import *
from pyomo.opt import SolverFactory
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def power_set(s):
x = len(s)
masks = [1 << i for i in range(x)]
for i in range(1 << x):
yield [ss for mask, ss in zip(masks, s) if i & mask]
# This data is taken directly from https://pe.usps.com/text/dmm300/Notice123.htm
# Look for Commercial under Parcel Select - Ground, in multiple of 4lbs.
ship_cost = [[7.84, 8.35, 8.97, 10.13, 13.91, 15.76, 17.79, 17.79],
[8.35, 10.07, 11.39, 17.15, 22.22, 26.33, 30.59, 30.59],
[11.9, 14.29, 16.79, 23.98, 31.56, 38.1, 44.38, 44.38],
[14.29, 17.87, 20.68, 29.83, 39.7, 45.54, 52.78, 52.78],
[16.00, 20.04, 23.64, 34.2, 47.13, 54.63, 64.04, 64.04],
[18.49, 22.5, 27.06, 37.59, 49.11, 57.43, 67.63, 67.63],
[22.25, 26.47, 34.7, 45.81, 52.46, 63.18, 76.43, 76.43],
[24.57, 27.98, 37.67, 48.22, 56.37, 68.99, 83.52, 83.52],
[25.78, 31.09, 40.69, 52.09, 61.95, 74.32, 89.66, 89.66],
[27.84, 35.24, 44.44, 59.73, 73.19, 84.77, 100.41, 100.41],
[28.72, 36.26, 46.00, 63.56, 76.59, 88.45, 105.17, 105.17],
[29.93, 37.28, 47.54, 67.07, 80.59, 93.21, 109.65, 109.65],
[31.85, 38.29, 49.09, 69.16, 83.08, 99.01, 115.3, 115.3],
[33.74, 39.3, 50.63, 70.83, 85.11, 103.15, 119, 119],
[35.65, 40.31, 52.18, 72.2, 86.87, 105.19, 126.41, 126.41],
[37.63, 41.33, 54.53, 73.21, 88.95, 107.02, 133.4, 133.4]]
def shipping_cost(zone, numItems):
if numItems == 0:
return 0
return round(ship_cost[numItems - 1][zone]/4, 2)
# We scale these constants by 4 to be realistic.
# Given a sequence of stores for each item, this function calculates the exact expected cost (in exponential time)
def compute_exact_cost(_assignment, _data, _nStores, _nItems, _nStages, _penaltyCost):
ub = 0
probabilityOfTrying = [1 for _item in range(_nItems)]
for stage in range(_nStages):
# Shipping Cost
for store in range(_nStores):
itemsInStore = set()
for _item in range(_nItems):
if _assignment[stage][_item] == store:
itemsInStore.add(_item)
for itemsToTry in power_set(itemsInStore):
shipCost = 0
for itemsToShip in power_set(itemsToTry):
itemsNotToShip = set(itemsToTry) - set(itemsToShip)
probShipping = 1
for it in itemsToShip:
probShipping *= 1 - _data[0][store]['p'][it]
for it in itemsNotToShip:
probShipping *= _data[0][store]['p'][it]
shipCost += probShipping * shipping_cost(_data[0][store]['s'], len(itemsToShip))
probTryingInSTore = prod(probabilityOfTrying[_it] for _it in itemsToTry)
itemsNotToTry = itemsInStore - set(itemsToTry)
probTryingInSTore *= prod(1 - probabilityOfTrying[_it] for _it in itemsNotToTry)
ub += probTryingInSTore * shipCost
# Picking Cost & Updating Trying Probabilities
for _item in range(_nItems):
if _assignment[stage][_item] != -1:
ub += probabilityOfTrying[_item] * _data[0][_assignment[stage][_item]]['c'][_item]
probabilityOfTrying[_item] *= _data[0][_assignment[stage][_item]]['p'][_item]
# Last Level
for _item in range(_nItems):
ub += probabilityOfTrying[_item] * _penaltyCost
return ub
# Given a sequence of stores for each item, this function calculates the exact expected cost (in exponential time)
# This one is a bit faster than compute_exact_cost()
# used for computations in draft
def compute_exact_cost_2(_assignment, _data, _nStores, _nItems, _nStages, _penaltyCost):
ub = 0
probabilityOfTrying = [1 for _item in range(_nItems)]
for stage in range(_nStages):
# Shipping Cost
for store in range(_nStores):
itemsInStore = set()
for _item in range(_nItems):
if _assignment[stage][_item] == store:
itemsInStore.add(_item)
for itemsToShip in power_set(itemsInStore):
itemsNotToShip = itemsInStore - set(itemsToShip)
probShipping = 1
for it in itemsToShip:
probShipping *= probabilityOfTrying[it] * (1 - _data[0][store]['p'][it])
for it in itemsNotToShip:
probShipping *= 1 - probabilityOfTrying[it] * (1 - _data[0][store]['p'][it])
ub += probShipping * shipping_cost(_data[0][store]['s'], len(itemsToShip))
# Picking Cost & Updating Trying Probabilities
for _item in range(_nItems):
if _assignment[stage][_item] != -1:
ub += probabilityOfTrying[_item] * _data[0][_assignment[stage][_item]]['c'][_item]
probabilityOfTrying[_item] *= _data[0][_assignment[stage][_item]]['p'][_item]
# Last Level
for _item in range(_nItems):
ub += probabilityOfTrying[_item] * _penaltyCost
return ub
# Given a sequence of stores for each item, this function calculates an upper bound to exact expected cost
# in polynomial time
# Not used in draft
def compute_upper_bound_polytime(_assignment, _data, _nStores, _nItems, _nStages, _penaltyCost):
ub = 0
probabilityOfTrying = [1 for _item in range(_nItems)]
for stage in range(_nStages):
for store in range(_nStores):
shipProbability = 0
for _itemInStore in range(_nItems):
if _assignment[stage][_itemInStore] == store:
shipProbability += probabilityOfTrying[_itemInStore] * (1 - _data[0][store]['p'][_itemInStore])
ub += shipping_cost(_data[0][store]['s'], shipProbability)
for _itemI in range(_nItems):
if _assignment[stage][_itemI] != -1:
ub += probabilityOfTrying[_itemI] * _data[0][_assignment[stage][_itemI]]['c'][_itemI]
# Update the probability of trying at store at the next stage
probabilityOfTrying[_itemI] *= _data[0][_assignment[stage][_itemI]]['p'][_itemI]
# Adding the last failure cost after last stage
for __item in range(_nItems):
ub += probabilityOfTrying[__item] * _penaltyCost
return ub
# For a single item this function computes the opt fulfillment policy and cost under linear shipping cost
def solve_dp_single_item(_item, _data, _nStores, _nItems, _nStages, _penaltyCost):
nItemsPerStore = [0 for _ in range(_nStores)]
for [_, _store] in _data[1]:
nItemsPerStore[_store] += 1
perUnitShippingCost = [shipping_cost(_data[0][_store]['s'], nItemsPerStore[_store]) / nItemsPerStore[_store]
for _store in range(_nStores)]
def ratio(_store):
return (_data[0][_store]['c'][_item]
+ (1 - _data[0][_store]['p'][_item]) * perUnitShippingCost[_store]) / (1 - _data[0][_store]['p'][_item])
# Eligible Stores for Item
eligibleStores = set()
for [__item, __store] in _data[1]:
if __item == _item:
eligibleStores.add(__store)
# Sort the Stores into a Sorted List
indexSorted = list(sorted(eligibleStores, key=lambda k: ratio(k)))
if len(indexSorted) < _nStages + 1:
# No need to solve the DP
_cost = _data[0][indexSorted[0]]['c'][_item] + (1 - _data[0][indexSorted[0]]['p'][_item]) \
* perUnitShippingCost[indexSorted[0]]
product = _data[0][indexSorted[0]]['p'][_item]
for stage in range(len(indexSorted) - 1):
_cost += product * (
_data[0][indexSorted[stage + 1]]['c'][_item] + (1 - _data[0][indexSorted[stage + 1]]['p'][_item]) *
perUnitShippingCost[indexSorted[stage + 1]])
product *= _data[0][indexSorted[stage + 1]]['p'][_item]
_cost += product * _penaltyCost
if len(indexSorted) == _nStages:
return indexSorted, _cost
else:
emptyLevels = [-1 for _ in range(_nStages - len(indexSorted))]
indexSorted.extend(emptyLevels)
return indexSorted, _cost
else:
# Solve the DP
# Initializing the value Function
valueFunction = [[50 * _penaltyCost for _ in range(1 + len(indexSorted))] for _ in range(1 + _nStages)]
# One extra stage to handle the penalty
# the "1+" in 1+len(indexSorted) is to handle #stores = #stages
for _store in range(1 + len(indexSorted)):
valueFunction[_nStages][_store] = _penaltyCost
selectedStoresLocalIndex = [[-1 for _ in range(1 + len(indexSorted))] for _ in range(_nStages)]
# Writing the recursion
for stage in reversed(range(_nStages)):
# s is position
for s in range(1 + len(indexSorted) - _nStages + stage):
# Initializing with huge cost
theCost = _nStores * _penaltyCost
for ss in range(s, 1 + len(indexSorted) - _nStages + stage):
# if _item == 3:
# print('ss', ss)
currentCost = _data[0][indexSorted[ss]]['c'][_item] + (1 - _data[0][indexSorted[ss]]['p'][_item]) \
* perUnitShippingCost[indexSorted[ss]] + _data[0][indexSorted[ss]]['p'][_item] \
* valueFunction[stage + 1][ss + 1]
if currentCost < theCost:
theCost = currentCost
selectedStoresLocalIndex[stage][s] = ss
valueFunction[stage][s] = theCost
selectedStore = [-1 for _ in range(_nStages)]
currentStoreIndex = 0
for stage in range(_nStages):
selectedStore[stage] = indexSorted[selectedStoresLocalIndex[stage][currentStoreIndex]]
currentStoreIndex = selectedStoresLocalIndex[stage][currentStoreIndex] + 1
return selectedStore, valueFunction[0][0]
# A lower bound to optimal cost
def solve_lb_multi_item_no_repeat(_data, _nStores, _nItems, _nStages, _penaltyCost):
lb = 0
lbAssignment = {stage: [-1 for _ in range(_nItems)] for stage in range(_nStages)}
for _item in range(_nItems):
itemAssignment, itemCost = solve_dp_single_item(_item, _data, _nStores, _nItems, _nStages, _penaltyCost)
lb += itemCost
for stage in range(_nStages):
lbAssignment[stage][_item] = itemAssignment[stage]
return lb, lbAssignment
# Linear Underestimator Heuristic that breaks multi-item order into single-item orders
def solve_lu(_data, _nStores, _nItems, _nStages, _penaltyCost):
lbAssignment = {stage: [-1 for _ in range(_nItems)] for stage in range(_nStages)}
for _item in range(_nItems):
itemAssignment, itemCost = solve_dp_single_item(_item, _data, _nStores, _nItems, _nStages, _penaltyCost)
for stage in range(_nStages):
lbAssignment[stage][_item] = itemAssignment[stage]
return compute_exact_cost_2(lbAssignment, _data, _nStores, _nItems, _nStages, _penaltyCost), lbAssignment
# Greedy heuristic that chooses L stores with lowest labor and shipping costs
def greedy(_data, _nStores, _nItems, _nStages, _penaltyCost):
storesAssignment = {stage: [-1 for _ in range(_nItems)] for stage in range(_nStages)}
for _item in range(_nItems):
# Eligible Stores for Item
eligibleStores = set()
for [__item, __store] in _data[1]:
if __item == _item:
eligibleStores.add(__store)
# Sort stores for each item based on pick cost only
index_sorted = sorted(eligibleStores,
key=lambda k: shipping_cost(_data[0][k]['s'], 1) + _data[0][k]['c'][_item])
_adjLevels = min(len(index_sorted), _nStages)
for stage in range(_adjLevels):
storesAssignment[stage][_item] = index_sorted[stage]
return compute_exact_cost_2(storesAssignment, _data, _nStores, _nItems, _nStages, _penaltyCost), storesAssignment
# This heuristic allows k stores to be tried at each stage.
def solve_k_sps(k, _data, _nStores, _nItems, _nStages, _penaltyCost):
k = min(k, _nItems)
nItemsPerStore = [0 for _ in range(_nStores)]
for [_, _store] in _data[1]:
nItemsPerStore[_store] += 1
perUnitShippingCost = [shipping_cost(_data[0][_store]['s'], nItemsPerStore[_store]) / nItemsPerStore[_store]
for _store in range(_nStores)]
ratio = [{} for _ in range(_nItems)]
for _item in range(_nItems):
for _store in range(_nStores):
if [_item, _store] in _data[1]:
ratio[_item][_store] = (_data[0][_store]['c'][_item] + (1 - _data[0][_store]['p'][_item])
* perUnitShippingCost[_store]) / (1 - _data[0][_store]['p'][_item])
# Now we need to have an assignment policy based on 'k' stores per stage
kAssignment = {stage: [-1 for _ in range(_nItems)] for stage in range(_nStages)}
true_k = k
stageAssigned = [False for _ in range(_nStages)]
for stage in range(_nStages):
while not stageAssigned[stage]:
allEligibleStores = set()
itemsHavingStores = set(range(_nItems))
for _item in range(_nItems):
allEligibleStores |= set(ratio[_item].keys())
if len(ratio[_item]) == 0:
itemsHavingStores.remove(_item)
true_k = min(true_k, len(itemsHavingStores), len(allEligibleStores))
if not len(itemsHavingStores):
stageAssigned[stage] = True
break
def varLinkConstraint_rule(_model, _item, _store):
return _model.x[_item, _store] <= _model.y[_store]
def oneStorePerItem_rule(_model, _item):
return sum(_model.x[_item, store] for store in ratio[_item].keys()) == 1
def numStoresConstraint_rule(_model):
return sum(_model.y[store] for store in model.stores) == true_k
def obj_rule(_model):
return sum(_model.ratio[i, j] * _model.x[i, j] for [i, j] in _model.itemStorePairs)
model = ConcreteModel()
model.products = Set(initialize=list(itemsHavingStores))
model.stores = Set(initialize=list(allEligibleStores))
model.itemStorePairs = Set(initialize=list((i, j) for i in model.products for j in ratio[i].keys()))
model.ratio = Param(model.itemStorePairs,
initialize={(i, j): ratio[i][j] for [i, j] in model.itemStorePairs})
model.x = Var(model.itemStorePairs, within=Binary)
model.y = Var(model.stores, within=Binary)
model.obj = Objective(rule=obj_rule, sense=minimize)
model.varLinkConstraint = Constraint(model.itemStorePairs, rule=varLinkConstraint_rule)
model.oneStorePerItem = Constraint(model.products, rule=oneStorePerItem_rule)
model.numStoresConstraint = Constraint(rule=numStoresConstraint_rule)
# model.pprint()
opt = SolverFactory('cplex')
results = opt.solve(model)
if results.solver.status == SolverStatus.ok and \
results.solver.termination_condition == TerminationCondition.optimal:
# Do something when the solution in optimal and feasible
stageAssigned[stage] = True
for [_item, _store] in model.itemStorePairs:
if model.x[_item, _store].value > 0.5:
kAssignment[stage][_item] = _store
del ratio[_item][_store]
usedStores = []
for _store in model.stores:
if model.y[_store].value > 0.5:
usedStores.append(_store)
elif results.solver.termination_condition == TerminationCondition.infeasible:
# Do something when model in infeasible
true_k += 1
if true_k > len(itemsHavingStores):
print('Warning Something Wrong!')
input('pause')
else:
# Something else is wrong
print('Solver Status: ', results.solver.status)
input('pause')
# print('kAssignment', kAssignment)
# input('pause')
return compute_exact_cost_2(kAssignment, _data, _nStores, _nItems, _nStages, _penaltyCost), kAssignment
# OPT using enumeration, too slow
# Not used at all in draft
def enumerate_opt(_data, _nStores, _nItems, _nStages, _penaltyCost):
eligibleStores = [set() for _ in range(_nItems)]
for _item in range(_nItems):
for [__item, __store] in _data[1]:
if __item == _item:
eligibleStores[_item].add(__store)
print('Eligible Stores:', eligibleStores)
commonStores = {(a, b): eligibleStores[a] & eligibleStores[b]
for (a, b) in list(itertools.combinations(range(_nItems), 2))}
c, pol = solve_lu(_data, _nStores, _nItems, _nStages, _penaltyCost)
if not any(commonStores.values()):
print('No Common Stores')
return c, pol
else:
print('Common Stores: ', commonStores)
combEligibleStores = [[] for _ in range(_nItems)]
for _item in range(_nItems):
if len(eligibleStores[_item]) == 0:
combEligibleStores[_item] = [tuple([-1 for _ in range(_nStages)])]
elif len(eligibleStores[_item]) < _nStages:
combEligibleStores[_item] = list(itertools.permutations(eligibleStores[_item], len(eligibleStores[_item])))
for _ in range(_nStages - len(eligibleStores[_item])):
for store_permutations in combEligibleStores[_item]:
# print(type(store_permutations))
store_permutations += (1,)
else:
combEligibleStores[_item] = list(itertools.permutations(eligibleStores[_item], _nStages))
# print('Eligible Items ', eligibleStores)
# print('combEligible Items : ', combEligibleStores)
optCost = c
optPolicy = tuple([tuple([pol[stage][_item] for stage in range(_nStages)]) for _item in range(_nItems)])
print('Policy:', optPolicy)
count = 0
print('Total # Policies: ', len(list(itertools.product(*combEligibleStores))))
for policy in itertools.product(*combEligibleStores):
# print('Policy', policy)
count += 1
print(count, end=' ')
checkPolicy = False
for stage in range(_nStages):
for (i1, i2) in itertools.combinations(range(_nItems), 2):
if policy[i1][stage] in commonStores[(i1, i2)] and policy[i2][stage] in commonStores[(i1, i2)]:
checkPolicy = True
break
if checkPolicy:
storesAssignment = [[policy[_item][_stage] for _item in range(_nItems)] for _stage in range(_nStages)]
costOfPolicy = compute_exact_cost_2(storesAssignment, _data, _nStores, _nItems, _nStages, _penaltyCost)
# print(costOfPolicy)
if costOfPolicy < optCost:
optCost = costOfPolicy
optPolicy = policy
print('OptPolicy: ', optPolicy, optCost)
optAssignment = {stage: [optPolicy[_item][stage] for _item in range(_nItems)] for stage in range(_nStages)}
return optCost, optAssignment
def solve_multi_item(algorithm, data):
nStores = len(data[0])
nItems = len(data[0][0]['p'])
nStages = 3
penaltyCost = 25
if algorithm == 'LB':
cost, assignment = solve_lb_multi_item_no_repeat(data, nStores, nItems, nStages, penaltyCost)
elif algorithm == 'LSC':
cost, assignment = solve_lu(data, nStores, nItems, nStages, penaltyCost)
elif algorithm == 'Greedy':
cost, assignment = greedy(data, nStores, nItems, nStages, penaltyCost)
elif algorithm == '1-SPS':
cost, assignment = solve_k_sps(1, data, nStores, nItems, nStages, penaltyCost)
elif algorithm == '2-SPS':
cost, assignment = solve_k_sps(2, data, nStores, nItems, nStages, penaltyCost)
elif algorithm == '4-SPS':
cost, assignment = solve_k_sps(4, data, nStores, nItems, nStages, penaltyCost)
elif algorithm == '6-SPS':
cost, assignment = solve_k_sps(6, data, nStores, nItems, nStages, penaltyCost)
elif algorithm == 'OPT':
cost, assignment = enumerate_opt(data, nStores, nItems, nStages, penaltyCost)
else:
cost, assignment = -1, -1
print('No Algorithm')
input()
return cost, assignment
| StarcoderdataPython |
1650236 | <reponame>zalanborsos/coresets
from __future__ import division, absolute_import
from coresets.coreset import Coreset
from coresets.k_means_coreset import KMeansLightweightCoreset, KMeansCoreset, KMeansUniformCoreset
from coresets.sensitivity import kmeans_sensitivity | StarcoderdataPython |
4826178 | <gh_stars>10-100
#!/usr/bin/python3
import requests
import argparse
from pprint import pprint
from time import sleep
def get_ticker(sym_pair, init=False):
url = 'https://arbitrage-logger.firebaseio.com/log_{}.json?orderBy="$key"&limitToLast=1'.format(sym_pair)
while True:
r = requests.get(url)
if r.status_code == 200:
data = r.json()
for t, val in data.items():
ticker = data[t]['ticker']
for x in ticker:
exch = x['exchange']
bid = x['bid']
ask = x['ask']
if bid != 0.0 and ask != 0.0:
if init == True:
with open(exch + '_' + sym_pair + '.csv', 'w') as f:
print('time,bid,ask', file=f)
print('{},{},{}'.format(t, bid, ask), file=f)
else:
with open(exch + '_' + sym_pair + '.csv', 'a') as f:
print('{},{},{}'.format(t, bid, ask), file=f)
init = False
sleep(3)
if __name__ == "__main__":
# Parsing command line arguments
parser = argparse.ArgumentParser(prog="python get_ticker.py")
parser.add_argument('-i', '--init',
type=bool,
default=False,
required=False,
help="if True, creates new files with given pair of symbols for every exchange")
parser.add_argument('symbol',
help="currency pair")
args = parser.parse_args(['-i', 'True', 'btc_usd'])
get_ticker(args.symbol, init=args.init) | StarcoderdataPython |
4823053 | #!/usr/bin/python3.6
"""Calculations for a single TIMD.
TIMD stands for Team In Match Data. TIMD calculations include
consolidation of (up to) 3 tempTIMDs (temporary TIMDs) into a single
TIMD, and the calculation of data points that are reflective of a team's
performance in a single match.
Consolidation is the process of determining the actions a robot
performed in a match by using data from (up to) 3 tempTIMDs. One
tempTIMD is created per scout per match. Ideally, 18 scouts are
distributed evenly across the 6 robots per match, resulting in 3
tempTIMDs per robot per match (aka 3 tempTIMDs per TIMD). However, the
number of tempTIMDs per TIMD may be less than 3, depending on scout
availability, incorrect scout distribution, or missing data.
Called by server.py with the name of the TIMD to be calculated."""
# External imports
import json
import os
import sys
import subprocess
# Internal imports
import consolidation
import decompressor
import utils
def percent_success(actions):
"""Finds the percent of times didSucceed is true in a list of actions.
actions is the list of actions that can either succeed or fail."""
successes = [action.get('didSucceed') for action in actions]
# Returns the integer percentage of times in successes that
# didSucceed is true. Taking an average of a list of booleans
# returns a float between 0 and 1 of what percentage of times the
# value was True.
# Example: [True, True, False, True] returns 75.
return round(100 * utils.avg(successes))
def filter_cycles(cycle_list, **filters):
"""Puts cycles through filters to meet specific requirements.
cycle_list is a list of tuples where the first item is an intake and
the second action is a placement or drop.
filters are the specifications that certain data points inside the
cycles must fit to be included in the returned cycles.
example for filter - 'level=1' as an argument, '{'level': 1}' inside
the function."""
filtered_cycles = []
# For each cycle, if any of the specifications are not met, the
# loop breaks and moves on to the next cycle, but if all the
# specifications are met, the cycle is added to the filtered cycles.
for cycle in cycle_list:
for data_field, requirement in filters.items():
# Handling for the cargo ship in level 1 placements.
if data_field == 'level' and requirement == 1:
# If no level is specified, it is a cargo ship placement.
if cycle[1].get('level', 1) != 1:
break
# Otherwise, the requirement is checked normally
else:
if cycle[1].get(data_field) != requirement:
break
# If all the requirements are met, the cycle is added to the
# (returned) filtered cycles.
else:
filtered_cycles.append(cycle)
return filtered_cycles
def calculate_avg_cycle_time(cycles):
"""Calculates the average time for an action based on start and end times.
Finds the time difference between each action pair passed, then
returns the average of the differences.
cycles is a list of tuples where the first action in the tuple is
the intake, and the second item is the placement or drop."""
cycle_times = []
for cycle in cycles:
# Subtracts the second time from the first because the time
# counts down in the timeline.
cycle_times.append(cycle[0].get('time') -
cycle[1].get('time'))
return utils.avg(cycle_times, None)
def calculate_total_action_duration(cycles):
"""Calculates the total duration of an action based on start and end times.
Finds the time difference between each action pair passed and
returns the sum of the differences. Used for both defense and incap
cycles.
cycles is a list of tuples where the first action marks the start of
a period (incap or defense), and the second action marks the end of
that period."""
cycle_times = []
for cycle in cycles:
# Subtracts the second time from the first because the time
# counts down in the timeline.
cycle_times.append(cycle[0].get('time') -
cycle[1].get('time'))
return sum(cycle_times)
def filter_timeline_actions(timd, **filters):
"""Puts a timeline through a filter to use for calculations.
timd is the TIMD that needs calculated data.
filters are the specifications that certain data points inside the
timeline must fit in order to be included in the returned timeline.
example for filter - 'level=1' as an argument, '{'level': 1}' inside
the function."""
filtered_timeline = []
# For each action, if any of the specifications are not met, the
# loop breaks and moves on to the next action, but if all the
# specifications are met, the action is added to the filtered
# timeline.
for action in timd.get('timeline', []):
for data_field, requirement in filters.items():
# Handling for the cargo ship in level 1 placements.
if data_field == 'level' and requirement == 1:
# If no level is specified, it is a cargo ship placement.
if action.get('level', 1) != 1:
break
elif data_field == 'zone' and requirement == 'loadingStation':
if action['zone'] not in ['leftLoadingStation',
'rightLoadingStation']:
break
# If the filter specifies time, it can either specify
# sandstorm by making the requirement 'sand' or specify
# teleop by making the requirement 'tele'.
#TODO: Rename 'sand' and 'tele'
elif data_field == 'time':
if requirement == 'sand' and action['time'] <= 135.0:
break
elif requirement == 'tele' and action['time'] > 135.0:
break
# Otherwise, it checks the requirement normally
else:
if action.get(data_field) != requirement:
break
# If all the requirements are met, the action is added to the
# (returned) filtered timeline.
else:
filtered_timeline.append(action)
return filtered_timeline
def make_paired_cycle_list(cycle_list):
"""Pairs up cycles together into tuples.
cycle_list is the list of actions that need to be paired up."""
# [::2] are the even-indexed items of the list, [1::2] are the
# odd-indexed items of the list. The python zip function puts
# matching-index items from two lists into tuples.
return list(zip(cycle_list[::2], cycle_list[1::2]))
def calculate_timd_data(timd):
"""Calculates data in a timd and adds it to 'calculatedData' in the TIMD.
timd is the TIMD that needs calculated data."""
calculated_data = {}
# Adds counting data points to calculated data, does this by setting
# the key to be the sum of a list of ones, one for each time the
# given requirements are met. This creates the amount of times those
# requirements were met in the timeline.
calculated_data['cargoScored'] = len(filter_timeline_actions(
timd, type='placement', didSucceed=True, piece='cargo'))
calculated_data['panelsScored'] = len(filter_timeline_actions(
timd, type='placement', didSucceed=True, piece='panel'))
calculated_data['cargoFouls'] = len(filter_timeline_actions(
timd, shotOutOfField=True))
calculated_data['pinningFouls'] = len(filter_timeline_actions(
timd, type='pinningFoul'))
calculated_data['cargoCycles'] = len(filter_timeline_actions(
timd, type='intake', piece='cargo'))
calculated_data['panelCycles'] = len(filter_timeline_actions(
timd, type='intake', piece='panel'))
cycle_actions = [action for action in timd.get('timeline', []) if \
action['type'] in ['placement', 'intake', 'drop']]
if len(cycle_actions) > 0:
# If the last action is an intake, it shouldn't count as a
# cycle, so it is subtracted from its cycle data field.
if cycle_actions[-1]['type'] == 'intake':
piece = cycle_actions[-1]['piece']
# HACK: Subtracts the extra intake from the already
# calculated number of cycles. Should be included in that
# calculation.
calculated_data[f'{piece}Cycles'] -= 1
calculated_data['cargoDrops'] = len(filter_timeline_actions(
timd, type='drop', piece='cargo'))
calculated_data['panelDrops'] = len(filter_timeline_actions(
timd, type='drop', piece='panel'))
calculated_data['cargoFails'] = len(filter_timeline_actions(
timd, type='placement', didSucceed=False, piece='cargo'))
calculated_data['panelFails'] = len(filter_timeline_actions(
timd, type='placement', didSucceed=False, piece='panel'))
calculated_data['cargoScoredSandstorm'] = len(
filter_timeline_actions(timd, type='placement', piece='cargo', \
didSucceed=True, time='sand'))
calculated_data['panelsScoredSandstorm'] = len(
filter_timeline_actions(timd, type='placement', piece='panel', \
didSucceed=True, time='sand'))
calculated_data['cargoScoredTeleL1'] = len(
filter_timeline_actions(timd, type='placement', piece='cargo', \
level=1, didSucceed=True, time='tele'))
calculated_data['cargoScoredTeleL2'] = len(
filter_timeline_actions(timd, type='placement', piece='cargo', \
level=2, didSucceed=True, time='tele'))
calculated_data['cargoScoredTeleL3'] = len(
filter_timeline_actions(timd, type='placement', piece='cargo', \
level=3, didSucceed=True, time='tele'))
calculated_data['panelsScoredTeleL1'] = len(
filter_timeline_actions(timd, type='placement', piece='panel', \
level=1, didSucceed=True, time='tele'))
calculated_data['panelsScoredTeleL2'] = len(
filter_timeline_actions(timd, type='placement', piece='panel', \
level=2, didSucceed=True, time='tele'))
calculated_data['panelsScoredTeleL3'] = len(
filter_timeline_actions(timd, type='placement', piece='panel', \
level=3, didSucceed=True, time='tele'))
calculated_data['cargoScoredL1'] = len(
filter_timeline_actions(timd, type='placement', piece='cargo', \
level=1, didSucceed=True))
calculated_data['cargoScoredL2'] = len(
filter_timeline_actions(timd, type='placement', piece='cargo', \
level=2, didSucceed=True))
calculated_data['cargoScoredL3'] = len(
filter_timeline_actions(timd, type='placement', piece='cargo', \
level=3, didSucceed=True))
calculated_data['panelsScoredL1'] = len(
filter_timeline_actions(timd, type='placement', piece='panel', \
level=1, didSucceed=True))
calculated_data['panelsScoredL2'] = len(
filter_timeline_actions(timd, type='placement', piece='panel', \
level=2, didSucceed=True))
calculated_data['panelsScoredL3'] = len(
filter_timeline_actions(timd, type='placement', piece='panel', \
level=3, didSucceed=True))
calculated_data['totalFailedCyclesCaused'] = sum([
action['failedCyclesCaused'] for action in
filter_timeline_actions(timd, type='endDefense')])
# The next set of calculated data points are the success
# percentages, these are the percentages (displayed as an integer)
# of didSucceed for certain actions, such as the percentage of
# success a team has loading panels.
calculated_data['panelLoadSuccess'] = percent_success(
filter_timeline_actions(timd, type='intake', piece='panel',
zone='loadingStation'))
calculated_data['cargoSuccessAll'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='cargo'))
calculated_data['cargoSuccessDefended'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='cargo',
wasDefended=True))
calculated_data['cargoSuccessUndefended'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='cargo',
wasDefended=False))
calculated_data['cargoSuccessL1'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='cargo',
level=1))
calculated_data['cargoSuccessL2'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='cargo',
level=2))
calculated_data['cargoSuccessL3'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='cargo',
level=3))
calculated_data['panelSuccessAll'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='panel'))
calculated_data['panelSuccessDefended'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='panel',
wasDefended=True))
calculated_data['panelSuccessUndefended'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='panel',
wasDefended=False))
calculated_data['panelSuccessL1'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='panel',
level=1))
calculated_data['panelSuccessL2'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='panel',
level=2))
calculated_data['panelSuccessL3'] = percent_success(
filter_timeline_actions(timd, type='placement', piece='panel',
level=3))
# Creates the cycle_list, a list of tuples where the intake is the
# first item and the placement or drop is the second. This is used
# when calculating cycle times.
cycle_list = []
for action in timd.get('timeline', []):
if action.get('type') in ['intake', 'placement', 'drop']:
# If the action is a failed loading station intake, it
# shouldn't play a part in cycles, so it is filtered out.
if not (action.get('type') == 'intake' and
action.get('didSucceed') is False):
cycle_list.append(action)
# There must be at least 2 actions to have a cycle.
if len(cycle_list) > 1:
# If the first action in the list is a placement, it is a
# preload, which doesn't count when calculating cycle times.
if cycle_list[0].get('type') in ['placement', 'drop']:
cycle_list.pop(0)
# If the last action in the list is an intake, it means the
# robot finished with a game object, in which the cycle was
# never completed.
if cycle_list[-1].get('type') == 'intake':
cycle_list.pop(-1)
paired_cycle_list = make_paired_cycle_list(cycle_list)
calculated_data['cargoCycleAll'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='cargo'))
calculated_data['cargoCycleL1'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='cargo', level=1))
calculated_data['cargoCycleL2'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='cargo', level=2))
calculated_data['cargoCycleL3'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='cargo', level=3))
calculated_data['panelCycleAll'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='panel'))
calculated_data['panelCycleL1'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='panel', level=1))
calculated_data['panelCycleL2'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='panel', level=2))
calculated_data['panelCycleL3'] = calculate_avg_cycle_time(
filter_cycles(paired_cycle_list, piece='panel', level=3))
# Calculates if a team is incap throughout the entirety of the match
# by checking if they have any actions in the match other than incap
# and unincap. If they don't have any other actions, they were incap
# the entire match.
for action in timd.get('timeline', []):
if action.get('type') not in ['incap', 'unincap'] and \
action.get('time') <= 135.0:
calculated_data['isIncapEntireMatch'] = False
break
else:
calculated_data['isIncapEntireMatch'] = True
# Creates a list of the climb dictionary or nothing if there is no
# climb. If there is a climb, the time of the climb is the amount
# of time they spent climbing.
for action in timd.get('timeline', []):
if action['type'] == 'climb':
calculated_data['timeClimbing'] = action['time']
calculated_data['selfClimbLevel'] = action['actual']['self']
calculated_data['robot1ClimbLevel'] = action['actual']['robot1']
calculated_data['robot2ClimbLevel'] = action['actual']['robot2']
# Creates a list of all the incap and unincap actions in the timeline.
incap_items = []
for action in timd.get('timeline', []):
if action.get('type') in ['incap', 'unincap']:
incap_items.append(action)
if len(incap_items) > 0:
# If the last action in the list is an incap, it means they
# finished the match incap, so it adds an unincap at the end of
# the timeline.
if incap_items[-1]['type'] == 'incap':
incap_items.append({'type': 'unincap', 'time': 0.0})
paired_incap_list = make_paired_cycle_list(incap_items)
# Calculates the timeIncap by calculating the total amount of
# time the robot spent incap during the match.
calculated_data['timeIncap'] = calculate_total_action_duration(
paired_incap_list)
else:
# Otherwise, the time that the robot spent incap is naturally 0.
calculated_data['timeIncap'] = 0.0
# Creates a list of all the startDefense and endDefense actions in
# the timeline.
defense_items = []
for action in timd.get('timeline', []):
if action['type'] in ['startDefense', 'endDefense']:
defense_items.append(action)
if len(defense_items) > 0:
paired_defense_list = make_paired_cycle_list(defense_items)
# 'timeDefending' is the total amount of time the robot spent
# defending during the match.
calculated_data['timeDefending'] = calculate_total_action_duration(
paired_defense_list)
else:
# Otherwise, the time that the robot spent defending is naturally 0.
calculated_data['timeDefending'] = 0.0
return calculated_data
# Check to ensure TIMD name is being passed as an argument
if len(sys.argv) == 2:
# Extract TIMD name from system argument
TIMD_NAME = sys.argv[1]
else:
print('Error: TIMD name not being passed as an argument. Exiting...')
sys.exit(0)
COMPRESSED_TIMDS = []
TEMP_TIMDS = {}
# Goes into the temp_timds folder to get the names of all the tempTIMDs
# that correspond to the given TIMD. Afterwards, the tempTIMDs are
# decompressed and addded them to the TEMP_TIMDS dictionary with the
# scout name as the key and the decompressed tempTIMD as the value.
# This is needed for the consolidation function
for temp_timd in os.listdir(utils.create_file_path('data/cache/temp_timds')):
if temp_timd.split('-')[0] == TIMD_NAME:
file_path = utils.create_file_path(
f'data/cache/temp_timds/{temp_timd}')
with open(file_path, 'r') as file:
compressed_temp_timd = file.read()
decompressed_temp_timd = list(decompressor.decompress_temp_timd(
compressed_temp_timd).values())[0]
scout_name = decompressed_temp_timd.get('scoutName')
TEMP_TIMDS[scout_name] = decompressed_temp_timd
# After the TEMP_TIMDS are decompressed, they are fed into the
# consolidation script where they are returned as one final TIMD.
FINAL_TIMD = consolidation.consolidate_temp_timds(TEMP_TIMDS)
# Adds the matchNumber and teamNumber necessary for later team calcs.
FINAL_TIMD['matchNumber'] = int(TIMD_NAME.split('Q')[1])
FINAL_TIMD['teamNumber'] = int(TIMD_NAME.split('Q')[0])
# Adds calculatedData to the FINAL_TIMD using the
# add_calculated_data_to_timd function at the top of the file.
FINAL_TIMD['calculatedData'] = calculate_timd_data(FINAL_TIMD)
# Save data in local cache
with open(utils.create_file_path(f'data/cache/timds/{TIMD_NAME}.json'),
'w') as file:
json.dump(FINAL_TIMD, file)
# Save data in Firebase upload queue
with open(utils.create_file_path(
f'data/upload_queue/timds/{TIMD_NAME}.json'), 'w') as file:
json.dump(FINAL_TIMD, file)
# TODO: Make 'forward_temp_super' more efficient (call it less often)
subprocess.call(f'python3 forward_temp_super.py', shell=True)
# After the timd is calculated, the team is calculated.
TEAM = TIMD_NAME.split('Q')[0]
subprocess.call(f'python3 calculate_team.py {TEAM}', shell=True)
| StarcoderdataPython |
1601436 | """
*A - Level 2*
"""
from .._pitch import Pitch
__all__ = ["A_2"]
class A_2(
Pitch,
):
pass
| StarcoderdataPython |
1619622 | from dialog_bot_sdk.bot import DialogBot
from config.config import bot
import db.db as core_db
def add_event(peer: str, command: str):
print("!", command)
core_db.add_event(int(peer.id), command)
send_message(
peer,
'added event'
)
def add_review(peer: str, command: str):
str1, str2 = command.split(" ", 1)
core_db.add_review(int(peer.id), str1, str2)
send_message(
peer,
'added review'
)
def show_review(peer: str, command: str):
res = core_db.show_review(int(peer.id), command)
send_message(
peer,
"Reviews about " + command + ':' + '\n' + res + "\n-------------------------\nEnd of reviews"
)
command_list = {
'add_event': add_event,
'show_review': show_review,
'add_reviews': add_review
}
def send_message(peer, msg: str) -> None:
bot.messaging.send_message(
peer,
msg
)
def get(string: str):
return string.split(" ", 1)
def check(peer, command: str) -> bool:
command, params = get(command)
if command not in command_list:
send_message(
peer,
'Invalid command'
)
return False
command_list[command](peer, params)
return True
| StarcoderdataPython |
1706430 | #!/usr/bin/env python3
import datetime
import json
import os
import pathlib
import re
from typing import Any, Callable, Dict, List, Optional, cast
from urllib.request import urlopen
def get_disabled_issues() -> List[str]:
pr_body = os.getenv("PR_BODY", "")
commit_messages = os.getenv("COMMIT_MESSAGES", "")
# The below regex is meant to match all *case-insensitive* keywords that
# GitHub has delineated would link PRs to issues, more details here:
# https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue.
# E.g., "Close #62851", "fixES #62851" and "RESOLVED #62851" would all match, but not
# "closes #62851" --> extra space, "fixing #62851" --> not a keyword, nor "fix 62851" --> no #
regex = "(?i)(Close(d|s)?|Resolve(d|s)?|Fix(ed|es)?) (#|https://github.com/pytorch/pytorch/issues/)([0-9]+)"
issue_numbers = [x[5] for x in re.findall(regex, pr_body + commit_messages)]
print("Ignoring disabled issues: ", issue_numbers)
return issue_numbers
IGNORE_DISABLED_ISSUES: List[str] = get_disabled_issues()
SLOW_TESTS_FILE = ".pytorch-slow-tests.json"
DISABLED_TESTS_FILE = ".pytorch-disabled-tests.json"
FILE_CACHE_LIFESPAN_SECONDS = datetime.timedelta(hours=3).seconds
def fetch_and_cache(
dirpath: str,
name: str,
url: str,
process_fn: Callable[[Dict[str, Any]], Dict[str, Any]],
) -> Dict[str, Any]:
"""
This fetch and cache utils allows sharing between different process.
"""
path = os.path.join(dirpath, name)
def is_cached_file_valid() -> bool:
# Check if the file is new enough (see: FILE_CACHE_LIFESPAN_SECONDS). A real check
# could make a HEAD request and check/store the file's ETag
fname = pathlib.Path(path)
now = datetime.datetime.now()
mtime = datetime.datetime.fromtimestamp(fname.stat().st_mtime)
diff = now - mtime
return diff.total_seconds() < FILE_CACHE_LIFESPAN_SECONDS
if os.path.exists(path) and is_cached_file_valid():
# Another test process already download the file, so don't re-do it
with open(path, "r") as f:
return cast(Dict[str, Any], json.load(f))
for _ in range(3):
try:
contents = urlopen(url, timeout=5).read().decode("utf-8")
processed_contents = process_fn(json.loads(contents))
with open(path, "w") as f:
f.write(json.dumps(processed_contents))
return processed_contents
except Exception as e:
print(f"Could not download {url} because: {e}.")
print(f"All retries exhausted, downloading {url} failed.")
return {}
def get_slow_tests(
dirpath: str, filename: str = SLOW_TESTS_FILE
) -> Optional[Dict[str, float]]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/generated-stats/stats/slow-tests.json"
try:
return fetch_and_cache(dirpath, filename, url, lambda x: x)
except Exception:
print("Couldn't download slow test set, leaving all tests enabled...")
return {}
def get_disabled_tests(
dirpath: str, filename: str = DISABLED_TESTS_FILE
) -> Optional[Dict[str, Any]]:
def process_disabled_test(the_response: Dict[str, Any]) -> Dict[str, Any]:
disabled_test_from_issues = dict()
for item in the_response["items"]:
title = item["title"]
key = "DISABLED "
issue_url = item["html_url"]
issue_number = issue_url.split("/")[-1]
if title.startswith(key) and issue_number not in IGNORE_DISABLED_ISSUES:
test_name = title[len(key) :].strip()
body = item["body"]
platforms_to_skip = []
key = "platforms:"
for line in body.splitlines():
line = line.lower()
if line.startswith(key):
pattern = re.compile(r"^\s+|\s*,\s*|\s+$")
platforms_to_skip.extend(
[x for x in pattern.split(line[len(key) :]) if x]
)
disabled_test_from_issues[test_name] = (
item["html_url"],
platforms_to_skip,
)
return disabled_test_from_issues
try:
url = "https://raw.githubusercontent.com/pytorch/test-infra/generated-stats/stats/disabled-tests.json"
return fetch_and_cache(dirpath, filename, url, process_disabled_test)
except Exception:
print("Couldn't download test skip set, leaving all tests enabled...")
return {}
| StarcoderdataPython |
3266264 | <filename>calc_hpi.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
calc_hpi.py
===========
Compute the auroral hemispheric power from an SWMF IDL file.
Created on Wed Dec 2 03:45:37 2020
@author: <NAME>
University of Michigan
Ann Arbor, MI
"""
import numpy as np
from spacepy.pybats import rim
def calc_hpi(a, debug=False):
'''
Integrate auroral energy and number fluxes to get the following
values:
tot_numflux: The total number flux over one hemisphere.
tot_aur_hpi: The total energy flux over one hemisphere (in GW).
Values are stored in the object with the prefix 'n_' or 's_',
to indicate the hemisphere for four different sources of precipitation:
'diff': Electron Diffuse
'idif': Ion Diffuse
'mono': Monoenergetic
'bbnd': Broadband
and may be accessed via self['n_diff']['tot_aur_hpi'], etc.
Parameters
==========
dict
Returns
=======
obj
Examples
========
>>> a = rim.Iono('spacepy/tests/data/pybats_test/it000321_104510_000.idl.gz')
>>> a.calc_I()
>>> print(a['n_diff']['tot_numflux'])
'''
# Calculate some physically meaningful values/units
units_eflux = 1E-9 # Watts to GigaWatts
units_numflux = 1E+04 # cm^-2 to m^-2
R = (6371.0+110.0)*1000.0 # Radius of Earth + iono altitude
dTheta = np.pi * a.dlat/180.
dPhi = np.pi * a.dlon/180.
hemi = ['n', 's']
for h in hemi:
# Get relevant values:
colat = a[h+'_theta']*np.pi/180.
# -----DIFFUSE PRECIP-----
a[h+'_diff'] = {}
# Number Flux
t = a[h+'_diff_ave-e'] * 1E03 * 11604
diff_numflux = a[h+'_rt rho'] * t**0.5 * 1553.5632/1.66E-21
integrand = (diff_numflux * np.sin(colat) * dTheta*dPhi)
a[h+'_diff']['tot_numflux'] = units_numflux*R**2 * np.sum(integrand)
# Energy Flux
integrand = (a[h+'_diff_e-flux'] * np.sin(colat) * dTheta*dPhi)
a[h+'_diff']['tot_aur_hpi'] = units_eflux*R**2 * np.sum(integrand)
if debug:
print((h+'-Diffuse: ' +
'Total Number Flux = {:4.3E} ' +
'Auroral HPI = {:.3f}').format(a[h+'_diff']['tot_numflux'],
a[h+'_diff']['tot_aur_hpi']))
# -----ION DIFF PRECIP-----
a[h+'_idif'] = {}
# Number Flux
t = a[h+'_idif_ave-e'] * 1E03 * 11604 * 5
idif_numflux = a[h+'_rt rho'] * t**0.5 * 36.26531/1.66E-21
integrand = (idif_numflux * np.sin(colat) * dTheta*dPhi)
a[h+'_idif']['tot_numflux'] = units_numflux*R**2 * np.sum(integrand)
# Energy Flux
integrand = (a[h+'_idif_e-flux'] * np.sin(colat) * dTheta*dPhi)
a[h+'_idif']['tot_aur_hpi'] = units_eflux*R**2 * np.sum(integrand)
if debug:
print((h+'-IonDiff: ' +
'Total Number Flux = {:4.3E} ' +
'Auroral HPI = {:.3f}').format(a[h+'_idif']['tot_numflux'],
a[h+'_idif']['tot_aur_hpi']))
# -----MONOENERGETIC PRECIP-----
a[h+'_mono'] = {}
# Number Flux
loc_up = a[h+'_jr']>0 # Upward FAC signify downward electrons
integrand = a[h+'_jr']*np.sin(colat)*dTheta*dPhi/1.6e-19
a[h+'_mono']['tot_numflux'] = 1E-06*R**2 * np.sum(integrand[loc_up])
# Energy Flux
integrand = (a[h+'_mono_e-flux'] * np.sin(colat) * dTheta*dPhi)
a[h+'_mono']['tot_aur_hpi'] = units_eflux*R**2 * np.sum(integrand)
if debug:
print((h+'-Mono: ' +
'Total Number Flux = {:4.3E} ' +
'Auroral HPI = {:.3f}').format(a[h+'_mono']['tot_numflux'],
a[h+'_mono']['tot_aur_hpi']))
# -----BROADBAND PRECIP-----
a[h+'_bbnd'] = {}
# Number Flux
integrand = (a[h+'_bbnd_n-flux'] * np.sin(colat) *
dTheta*dPhi)
a[h+'_bbnd']['tot_numflux'] = units_numflux*R**2 * np.sum(integrand)
# Energy Flux
integrand = (a[h+'_bbnd_e-flux'] * np.sin(colat) * dTheta*dPhi)
a[h+'_bbnd']['tot_aur_hpi'] = units_eflux*R**2 * np.sum(integrand)
if debug:
print((h+'-Broadband: ' +
'Total Number Flux = {:4.3E} ' +
'Auroral HPI = {:.3f}').format(a[h+'_bbnd']['tot_numflux'],
a[h+'_bbnd']['tot_aur_hpi']))
return a | StarcoderdataPython |
1622043 | <filename>finite-mdp/setup.py
from setuptools import setup, find_packages
setup(
name='finite-mdp',
version='1.0.dev0',
description='Gym environment for MDPs with finite state and action spaces',
url='https://github.com/eleurent/finite-mdp',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Intended Audience :: Researchers',
'Programming Language :: Python :: 3.5',
],
keywords='finite mdp',
packages=find_packages(exclude=['docs', 'scripts', 'tests*']),
install_requires=['gym', 'numpy', 'matplotlib', 'torch>=1.2.0', 'networkx'],
tests_require=['pytest'],
extras_require={
'dev': ['scipy'],
},
entry_points={
'console_scripts': [],
},
)
| StarcoderdataPython |
1755002 | <reponame>steingabelgaard/reportlab
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
from tools.docco.rl_doc_utils import *
heading1("Graphics")
heading2("Introduction")
disc("""
ReportLab Graphics is one of the sub-packages to the ReportLab
library. It started off as a stand-alone set of programs, but is now a
fully integrated part of the ReportLab toolkit that allows you to use
its powerful charting and graphics features to improve your PDF forms
and reports.
""")
| StarcoderdataPython |
3398200 | #!/usr/bin/env python3
import re
import json
import os
import numpy as np
import matplotlib.pyplot as plt
from itertools import tee
def parse_data(folder):
data = []
for filename in os.listdir(folder):
fullname = os.path.join(folder, filename)
if fullname.endswith('.txt'):
with open(fullname, 'r') as infile:
obj = json.loads(infile.read())
data.extend(obj)
return data
def filter_data(data, visa_type = None, location = None, status = None, new_renewal = None, major = [], since = None, fields = []):
status_filter = lambda x: x["status"].lower() == status if status else True
loc_filter = lambda x: x["loc"].lower() == location if location else True
visa_filter = lambda x: x["visa"].lower() == visa_type if visa_type else True
type_filter = lambda x: x["type"].lower() == new_renewal if new_renewal else True
major_filter = lambda x: x["major"].lower() in major if major else True
since_filter = lambda x: x["check_date"] > since if since else True
filter_func = lambda x: status_filter(x) and loc_filter(x) and visa_filter(x) and type_filter(x) and major_filter(x) and since_filter(x)
it = filter(filter_func, data)
filter_it, debug_it = tee(it)
if __debug__:
with open('debug.log', 'w') as debug_file:
debug_file.write(json.dumps(list(debug_it)))
res = [field[0](x[field[1]]) for field in fields for x in filter_it]
return res
if __name__ == "__main__":
data = parse_data('../retriever/data/')
visa_type = 'h1'
location = 'beijing'
status = 'clear'
new_renewal = 'new'
since = '2018-02-01'
filtered = filter_data(data,
visa_type = visa_type,
location = location,
status = status,
new_renewal = new_renewal,
since = since,
fields = [(lambda x:int(x), 'waiting_days')])
if len(filtered) == 0:
print('Nothing filtered')
exit(0)
print('Total ' + str(len(filtered)) + ' samples filtered')
print('Average waiting days: ' + str(sum(filtered) / float(len(filtered))))
step = 2
bins = np.arange(0, max(filtered) + step, step = step)
plt.hist(filtered, bins = bins)
title = "Histogram for " + visa_type + " visa at " + location + " consulate " + status + " since " + since
plt.title(title)
plt.xlabel("number of days in waiting")
plt.ylabel("number of applications")
plt.xticks(bins)
print(title)
plt.show() | StarcoderdataPython |
4839748 | #estrutura de repetição for;
#Criei uma função sem parâmetros;
#Declarei variável com o comando FOR para colocá-la dentro de um intervalo;
#Para criar um intervalo usa o comando range;
#Os valores impressos irão de 5 até 9, pois o limite do intervalo não entra.
def estruturaFor():
for x in range(5,10):
print(x)
#Chama a função:
estruturaFor()
| StarcoderdataPython |
9677 | <reponame>piotr-karon/realworld-starter-kit
#!/usr/bin/env python3
import json
import os
from pathlib import Path
import numpy as np
from natsort import natsorted
try:
from docopt import docopt
from marko.ext.gfm import gfm
import pygal
from pygal.style import Style, DefaultStyle
except ImportError as e:
raise Exception('Some external dependencies not found, install them using: pip install -r requirements.txt') from e
def render():
suffix = '.avg.checks.bench.json'
suites = {}
for filepath in Path('').glob(f'*{suffix}'):
name = filepath.name[:-len(suffix)]
print(f'Loading {filepath} as {name}.')
with open(filepath) as fp:
suites[name] = json.load(fp)
names = natsorted(suites.keys())
figure_filenames = render_figures(names, suites)
out_filename = Path('bench-results.md')
with open(out_filename, 'w') as out:
cwd = os.getcwd().split(os.sep)[-2:]
print(f'# Benchmark of {", ".join(names)} in {cwd}', file=out)
notes_file = Path('notes.md')
if notes_file.exists():
print(f'Including {notes_file} in resulting Markdown.')
with notes_file.open() as fp:
out.write(fp.read())
else:
print(f'File {notes_file} does not exist, create it to include it in resulting Markdown.')
# print('## General Info & Checks', file=out)
# render_checks(names, suites, out)
print('## Graphs', file=out)
print('*The graphs are interactive, view the rendered HTML locally to enjoy it.*\n', file=out)
for filename in figure_filenames:
# Use HTML instead of Markdown image to specify the width
print(f'<img type="image/svg+xml" src="{filename}" alt="{filename}" width="49%"/>', file=out)
print(f'Markdown output written to {out_filename}.')
render_html(out_filename, Path('bench-results.html'))
def render_checks(names, suites, out):
print(f'|Check|{"|".join(names)}|', file=out)
print(f'|{"|".join(["---"] * (len(names) + 1))}|', file=out)
per_impl_checks = {name: suite['checks'] for name, suite in suites.items()}
check_names = sorted(set().union(*(checks.keys() for checks in per_impl_checks.values())))
def sanitize(value):
if type(value) is float:
value = float(f'{value:.3g}') # round to 3 significant figures
return str(int(value) if value >= 100 else value)
return str(value)
for check_name in check_names:
values = [sanitize(per_impl_checks[name].get(check_name)) for name in names]
if len(values) > 1 and len(set(values)) > 1:
values = [f'**{value}**' for value in values]
print(f'|{check_name}|{"|".join(values)}|', file=out)
FIGURE_FUNCS = []
def figure(func):
"""Simple decorator to mark a function as a figure generator."""
FIGURE_FUNCS.append(func)
return func
def render_figures(names, suites):
filenames = []
config = pygal.Config(legend_at_bottom=True, style=DefaultStyle)
for figure_func in FIGURE_FUNCS:
chart = figure_func(names, suites, config.copy())
filename = f'bench-results.{figure_func.__name__}.svg'
chart.render_to_file(filename)
filenames.append(filename)
return filenames
@figure
def startup_time_figure(names, suites, config):
all_vals = [suites[name]['startup_max'] for name in names]
mx = np.max(all_vals)
config.range = (0, mx + 0.1)
chart = pygal.Bar(config, value_formatter=lambda x: "{:0.2f}s".format(x))
chart.title = 'Czas uruchomienia (s)'
for name in names:
vals = [{'value': suites[name]['startup_avg'],
'ci': {'low': suites[name]['startup_min'], 'high': suites[name]['startup_max']}}]
# print(vals)
chart.add(name, vals)
return chart
@figure
def errors_vs_connections_figure(names, suites, config):
all_vals = [suites[name]['stats'] for name in names]
flat = [item for sublist in all_vals for item in sublist]
print(flat)
all_rates = [
div_or_none(s['request_errors_new_avg'], s['request_errors_new_avg'] + s['requests_new_avg'], scale=100) for s
in flat]
mx = np.max(all_rates)
config.range = (0, mx + mx * 0.1)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.2f}%".format(x))
chart.title = 'Współczynnik liczby błędów względem liczby połączeń (%)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [
div_or_none(s['request_errors_new_avg'], s['request_errors_new_avg'] + s['requests_new_avg'], scale=100)
for s in suites[name]['stats'][0:]])
return chart
@figure
def requests_vs_connections_figure(names, suites, config):
vals = [[x['requests_per_s_avg'] for x in suites[name]['stats']] for name in names]
print(vals)
mx = np.max(vals)
config.range = (0, mx + mx * 0.1)
config.min_scale = 6
chart = pygal.Line(config, value_formatter=lambda x: "{:0.0f}".format(x))
chart.title = 'Liczba sukcesów na sekundę względem liczby połączeń (Zapytań/s)'
connections_x_labels(chart, suites, skip=0)
for name in names:
# print(suites[name]['stats'])
# vals = [{'value': x['requests_per_s_avg'], 'ci': {'low': x['requests_per_s_min'], 'high': x['requests_per_s_max']}} for x in suites[name]['stats']]
vals = [{'value': x['requests_per_s_avg']} for x in suites[name]['stats']]
chart.add(name, vals)
return chart
@figure
def latency_vs_connections_50_figure(names, suites, config):
return latency_vs_connections_figure(50, names, suites, config)
@figure
def latency_vs_connections_90_figure(names, suites, config):
return latency_vs_connections_figure(90, names, suites, config)
@figure
def latency_vs_connections_99_figure(names, suites, config):
return latency_vs_connections_figure(99, names, suites, config)
def latency_vs_connections_figure(percentile, names, suites, config):
all_vals = [[s[f'latency_{percentile}p_ms_avg'] for s in suites[name]['stats'][0:]] for name in names]
mx = np.max(all_vals)
mn = np.min(all_vals)
config.range = (mn - mn * .5, mx + mx * .5)
chart = pygal.Line(config, logarithmic=True, value_formatter=lambda x: "{:0.0f}".format(x))
chart.title = f'{percentile}. centyl czasu odpowiedzi względem liczby połączeń (ms)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [s[f'latency_{percentile}p_ms_avg']
for s in suites[name]['stats'][0:]])
return chart
@figure
def max_mem_usage_figure(names, suites, config):
all_vals = [[s['mem_usage_mb_avg'] for s in suites[name]['stats']] for name in names]
mx = np.max(all_vals)
config.range = (0, mx + .1 * mx)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.0f}".format(x))
chart.title = 'Maksymalne zużycie pamięci względem liczby połączeń (MiB)'
connections_x_labels(chart, suites)
for name in names:
chart.add(name, [s['mem_usage_mb_avg'] for s in suites[name]['stats']])
return chart
@figure
def max_mem_usage_per_requests_figure(names, suites, config):
all_vals = [[div_or_none(s['mem_usage_mb_avg'], s['requests_per_s_avg']) for s in suites[name]['stats'][0:]] for name in names]
mx = np.max(all_vals)
config.range = (0, mx + .1 * mx)
config.min_scale = 6
chart = pygal.Line(config, value_formatter=lambda x: "{:0.3f}".format(x))
chart.title = 'Maksymalne zużycie pamięci per liczba sukcesów na sekundę (MiB-sekunda/Zapytanie)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name,
[div_or_none(s['mem_usage_mb_avg'], s['requests_per_s_avg']) for s in suites[name]['stats'][0:]])
return chart
@figure
def cpu_figure(names, suites, config):
mx = np.max([[s['cpu_new_s_avg'] for s in suites[name]['stats'][0:]] for name in names])
config.range = (0, mx + mx * 0.1)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.3f}".format(x))
chart.title = 'Wykorzystanie czasu procesora w czasie rundy testów (sekundy CPU)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [s['cpu_new_s_avg'] for s in suites[name]['stats'][0:]])
return chart
@figure
def cpu_per_request_figure(names, suites, config):
mx = np.max([[div_or_none(s['cpu_new_s_avg'], s['requests_new_avg'], scale=1000) for s in
suites[name]['stats'][0:]] for name in names])
config.range = (0, mx + mx * 0.1)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.3f}".format(x))
chart.title = 'Wykorzystanie czasu procesora per poprawna odpowiedź (milisekundy CPU/Req)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [div_or_none(s['cpu_new_s_avg'], s['requests_new_avg'], scale=1000) for s in
suites[name]['stats'][0:]])
return chart
@figure
def cpu_vs_requests_figure(names, suites, config):
all_vls = [[s['requests_total_avg'] for s in suites[name]['stats']] for name in names]
mx = np.max(all_vls)
config.range = (0, mx + mx * 0.1)
config.min_scale = 6
chart = pygal.XY(config, value_formatter=lambda x: "{:0.0f}".format(x), series_formatter=lambda x: "{:0.2f}".format(x))
chart.title = 'Skumulowana liczba poprawnych odpowiedzi względem skumulowanego czasu CPU'
chart.x_title = 'sekundy CPU'
chart.y_title = 'skumulowana liczba poprawnych odpowiedzi'
for name in names:
chart.add(name, [
{'value': (s['cpu_total_s_avg'], s['requests_total_avg']),
'label': f'After {s["connections"]} connections round.'}
for s in suites[name]['stats']
])
return chart
def connections_x_labels(chart, suites, skip=0):
chart.x_labels = [f"{s['connections']} conn's" if s['connections'] else s['message']
for s in next(iter(suites.values()))['stats']][skip:]
chart.x_label_rotation = -30
def div_or_none(numerator, denominator, scale=1):
if not denominator:
return None
return scale * numerator / denominator
HTML_PREFIX = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Benchmark Report</title>
</head>
<body>
'''
HTML_SUFFIX = ''' </body>
</html>
'''
def render_html(md_file, html_file):
with open(md_file) as in_fp, open(html_file, 'w') as out_fp:
rs = in_fp.read()
html = gfm(rs)
# Replace <img> by <embed> for pygal interactivity, http://www.pygal.org/en/latest/documentation/web.html
html = html.replace('<img', '<embed')
# Replace link to md with link to .html for better browsability at HTML level.
html = html.replace('/README.md">full benchmark', '/README.html">full benchmark')
out_fp.write(HTML_PREFIX)
out_fp.write(html)
out_fp.write(HTML_SUFFIX)
print(f'HTML output written to {html_file.resolve().as_uri()}.')
if __name__ == '__main__':
# args = docopt(__doc__)
render()
| StarcoderdataPython |
3327415 | <gh_stars>0
from base64 import b64encode
import json
import requests
import sys
__docformat__ = 'sphinx en'
class Pyrate(object):
"""This is the main class
:param list http_methods: List of available HTTP methods for this service
:param list return_formats: List of available return formats for this service
:param default_header_content: Default content for the request header
:param default_body_content: Default content for the request body
:param string default_http_method: Default HTTP method (will be used if none else is specified in request)
:param string default_return_format: Default return format (will be used if none else is specified in request)
:param string connection_check_method: Used by :func:`check_connection`
:param string auth_type: The authentification type. Obsolete.
:param string base_url: The base url for all api requests
:param bool send_json: Whether the request body should be encoded with json
"""
http_methods = ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']
return_formats = []
default_header_content = None
default_body_content = None
default_http_method = None
default_return_format = None
connection_check_method = None
auth_type = None
base_url = None
send_json = False
def __init__(self):
self.default_http_method = self.http_methods[0]
try:
self.default_return_format = self.return_formats[0]
except IndexError:
self.default_return_format = ''
def create_basic_auth(self, user, password):
"""Creates the header content for HTTP Basic Authentification.
:param user: Username
:param password: Password
:rtype: Base64-encoded auth string
"""
# Messing around with Python3's strictness about strings
if sys.version_info >= (3, 0):
if not isinstance(user, str):
user = user.decode('utf-8')
if not isinstance(password, str):
password = password.decode('utf-8')
return 'Basic ' + b64encode((user + ":" + password).encode('utf-8')).decode('utf-8')
else:
return 'Basic ' + b64encode(user + ":" + password).rstrip()
def get_oauth(self):
raise NotImplementedError("OAuth methods need to be implemented by subclasses!")
def check_connection(self):
res = self.do(self.connection_check_method[1], http_method=self.connection_check_method[0])
if res and self.connection_check_method[2] in res:
if self.connection_check_method[3]:
if res[self.connection_check_method[2]] == self.connection_check_method[3]:
return True
else:
return True
raise Exception("Check connection failed:\n%s" % res)
def build_content(self, args):
# takes a dictionary, filters out all the empty stuff
if 'self' in args:
del args['self']
new_args = args.copy()
for key in args:
if not args[key]:
del new_args[key]
return new_args
def check_response_success(self, response):
raise NotImplementedError('Please implement in subclass')
def parse_errors(self, response):
raise NotImplementedError('Please implement in subclass')
def do(self, method, content=None, headers=None, http_method=None, return_format=None):
request_body = self.default_body_content
if content is not None:
request_body.update(content)
request_headers = self.default_header_content
if headers is not None:
request_headers.update(headers)
if http_method is None:
http_method = self.default_http_method
if return_format is None:
if self.default_return_format:
return_format = "." + self.default_return_format
else:
return_format = ''
request_url = self.base_url + method + return_format
return self.do_request(http_method, request_url, request_headers, request_body, return_format)
def do_request(self, http_method, url, headers, body, return_format):
if self.auth_type == 'OAUTH1':
auth_data = self.get_oauth()
else:
auth_data = None
if self.send_json:
# We need to make sure that body is jsonified
try:
body = json.dumps(body)
except TypeError or ValueError:
pass
if http_method.upper() == 'GET':
r = requests.get(url, headers=headers, auth=auth_data)
elif http_method.upper() == 'POST':
r = requests.post(url, data=body, headers=headers, auth=auth_data)
elif http_method.upper() == 'PUT':
r = requests.put(url, data=body, headers=headers, auth=auth_data)
elif http_method.upper() == 'DELETE':
r = requests.delete(url, data=body, headers=headers, auth=auth_data)
elif http_method.upper() == 'OPTIONS':
r = requests.options(url, data=body, headers=headers, auth=auth_data)
else:
raise Exception("Invalid request method")
return self.handle_response(r, return_format)
def handle_response(self, response, return_format):
try:
return response.json()
except (ValueError, TypeError):
return response.content
#Proxy functions for usability
def get(self, method, content=None, headers=None, return_format=None):
return self.do(method, content, headers, 'GET', return_format)
def post(self, method, content=None, headers=None, return_format=None):
return self.do(method, content, headers, 'POST', return_format)
def put(self, method, content=None, headers=None, return_format=None):
return self.do(method, content, headers, 'PUT', return_format)
def delete(self, method, content=None, headers=None, return_format=None):
return self.do(method, content, headers, 'DELETE', return_format)
def options(self, method, content=None, headers=None, return_format=None):
return self.do(method, content, headers, 'OPTIONS', return_format)
| StarcoderdataPython |
3253589 | <gh_stars>1-10
import os
import shutil
import time
def get_metadata_paths(CONFIG, ARGS):
print("Creating experiment enviroment")
metadata_path = "{}/{}/{}".format(CONFIG.network.metadata.path, CONFIG.network.parameters.model_name, time.strftime("%d-%m-%Y"))
n_experiment = "1"
if os.path.exists(metadata_path):
filenames = os.listdir(metadata_path)
filenames = sorted(filenames)
if len(filenames) > 0:
last_name = int(filenames[-1])
n_experiment = str(last_name + 1)
metadata_path = "{}/{}".format(metadata_path, n_experiment)
checkpoint_path = '{}/{}'.format(metadata_path, "checkpoints")
logs_path = "{}/{}".format(metadata_path, "logs")
os.makedirs(checkpoint_path, exist_ok=True)
os.makedirs(logs_path, exist_ok=True)
checkpoint_path = "{}/{}".format(checkpoint_path, "model.h5")
shutil.copy(ARGS.config_file, "{}/config.json".format(metadata_path))
return metadata_path, checkpoint_path, logs_path | StarcoderdataPython |
1617227 | <reponame>Davy-71993/MySchool<gh_stars>0
from django.contrib import admin
from .models import Calendar, Event, Term
admin.site.register(Calendar)
admin.site.register(Event)
admin.site.register(Term)
# Register your models here.
| StarcoderdataPython |
1624336 | <reponame>movermeyer/django-contact-form-site<filename>django_contact/__init__.py
from django_contact.forms import ContactForm
__all__ = ['ContactForm']
default_app_config = 'django_contact.apps.ContactFormConfig' | StarcoderdataPython |
1702107 | from GameElementBase import GameElementBase
class LivingThings(GameElementBase):
def __init__(self,position,beingid):
self.beingid=beingid
self.position = position
self.mapsize=[3,3]
self.inventory=[]
def move(self,direction):
flag=False
if direction=="n":
if self.position[0]>0:
self.position[0] -=1
flag = True
if direction=="s":
if self.position[0]<(self.mapsize[0]-1):
self.position[0] +=1
flag = True
if direction=="w":
if self.position[1]>0:
self.position[1] -=1
flag = True
if direction=="e":
if self.position[1]<(self.mapsize[1]-1):
self.position[1] +=1
flag = True
if flag and self.beingid == "PLAYER":
from StaticController import StaticController
StaticController.variableMap["JUST_ENTERED"] = 1
room = StaticController.gameMap.getRoom(self.position)
StaticController.variableMap["CURRENT_ROOM"] = room
# roomname = StaticController.gameMap.getRoomName(self.position)
# StaticController.variableMap[self.beingid + "."+"CURRENT_ROOM"] = roomname
return flag
def getPosition(self):
print(StaticController.test)
return self.position
| StarcoderdataPython |
3288199 | import logging,time
log_filename = r'./log/rg' + time.strftime('_%Y%m%d') + '.log'
logger = logging.getLogger('rg_log')
logger.setLevel(logging.INFO)
# 调用模块时,如果错误引用,比如多次调用,每次会添加Handler,造成重复日志,这边每次都移除掉所有的handler,后面在重新添加,可以解决这类问题
while logger.hasHandlers():
for i in logger.handlers:
logger.removeHandler(i)
# file log
format='%(asctime)s [%(levelname)s] %(message)s'
date_format= '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=format,datefmt=date_format)
handler = logging.FileHandler(log_filename, encoding='utf-8')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
def debug(msg, exc_info=False, stack_info=False):
logger.debug(msg,exc_info=exc_info,stack_info=stack_info)
def info(msg, exc_info=False, stack_info=False):
logger.info(msg,exc_info=exc_info,stack_info=stack_info)
def warning(msg, exc_info=False, stack_info=False):
logger.warning(msg,exc_info=exc_info,stack_info=stack_info)
def error(msg, exc_info=False, stack_info=False):
logger.error(msg,exc_info=exc_info,stack_info=stack_info)
def critical(msg, exc_info=False, stack_info=False):
logger.critical(msg,exc_info=exc_info,stack_info=stack_info)
if __name__ == '__main__':
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
| StarcoderdataPython |
139323 | <reponame>pjreed/rr_control_input_manager<gh_stars>0
#!/usr/bin/env python
# Author: <NAME>
# Description: This script manages cmd_vel from multiple sources so that they don't over-ride eachother, and so that soft E-stop can works from multiple sources.
import rospy
import time
from std_msgs.msg import Bool, String
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist, TwistStamped
from actionlib_msgs.msg import GoalID
from copy import deepcopy
class CmdVelManager(object):
use_joystick = True
soft_estop = False
local_control_lock = False
remote_control_lock = False
command_timeout = 0.5
keyboard_control_input_request = Twist()
move_base_control_input_request = Twist()
auto_dock_control_input_request = TwistStamped()
fleet_manager_control_input_request = Twist()
joy_control_input_request = TwistStamped()
managed_control_input = TwistStamped()
seq = 0
def __init__(self):
# ROS Subscribers
self.keyboard_sub = rospy.Subscriber("/cmd_vel/keyboard", Twist, self.keyboard_cb)
self.joystick_sub = rospy.Subscriber("/cmd_vel/joystick", TwistStamped, self.joystick_cb)
self.move_base_sub = rospy.Subscriber("/cmd_vel/move_base", Twist, self.move_base_cb)
self.fleet_manager_sub = rospy.Subscriber("/cmd_vel/fleet_manager", Twist, self.fleet_manager_cb)
self.auto_dock_sub = rospy.Subscriber("/cmd_vel/auto_dock", TwistStamped, self.auto_dock_cb)
self.soft_estop_enable_sub = rospy.Subscriber("/soft_estop/enable", Bool, self.soft_estop_enable_cb)
self.soft_estop_reset_sub = rospy.Subscriber("/soft_estop/reset", Bool, self.soft_estop_reset_cb)
# ROS Publishers
self.managed_pub = rospy.Publisher('/cmd_vel/managed', TwistStamped, queue_size=1)
self.move_base_cancel = rospy.Publisher('/move_base/cancel', GoalID, queue_size=1)
self.active_controller_pub = rospy.Publisher('/rr_control_input_manager/active_controller', String, queue_size=1)
self.auto_dock_cancel = rospy.Publisher('/auto_dock/cancel', Bool, queue_size = 10)
self.last_move_base_command_time = rospy.Time.now()
self.last_auto_dock_command_time = rospy.Time.now()
self.last_fleet_manager_command_time = rospy.Time.now()
self.last_joy_command_time = rospy.Time.now()
self.last_keyboard_command_time = rospy.Time.now()
def control_input_pub(self, data):
my_managed_control_input = deepcopy(self.managed_control_input)
my_managed_control_input.header.seq = self.seq
my_managed_control_input.header.stamp = rospy.Time.now()
my_managed_control_input.header.frame_id = 'none'
my_managed_control_input.twist.linear.x=0.0
my_managed_control_input.twist.angular.y=0.0
my_managed_control_input.twist.angular.z=0.0
current_time = rospy.Time.now()
move_base_time_elapsed = current_time - self.last_move_base_command_time
auto_dock_time_elapsed = current_time - self.last_auto_dock_command_time
keyboard_time_elapsed = current_time - self.last_keyboard_command_time
fleet_manager_time_elapsed = current_time - self.last_fleet_manager_command_time
joy_time_elapsed = current_time - self.last_joy_command_time
if joy_time_elapsed.to_sec > 2:
self.lock_release_cb()
# Process non-human-local commands
if not self.local_control_lock:
# move_base requests (Priority 4)
if move_base_time_elapsed.to_sec() < self.command_timeout:
my_managed_control_input.twist = self.move_base_control_input_request
my_managed_control_input.header.frame_id = 'move_base'
# auto_dock requests (Lowest Priority 3)
if auto_dock_time_elapsed.to_sec() < self.command_timeout:
my_managed_control_input = self.auto_dock_control_input_request
my_managed_control_input.header.frame_id = 'auto_dock'
# fleet_manager requests (Priority 2)
if fleet_manager_time_elapsed.to_sec() < self.command_timeout:
my_managed_control_input.twist = self.fleet_manager_control_input_request
my_managed_control_input.header.frame_id = 'fleet_manager'
# keyboard priority 2
if keyboard_time_elapsed.to_sec() < self.command_timeout:
my_managed_control_input.twist = self.keyboard_control_input_request
my_managed_control_input.header.frame_id = 'keyboard'
# Process joystick requests (Highest Priority 1)
if joy_time_elapsed.to_sec() < self.command_timeout:
my_managed_control_input = self.joy_control_input_request
my_managed_control_input.header.frame_id = 'joystick'
# Check for estop
if self.soft_estop:
my_managed_control_input.header.frame_id = 'soft e-stopped'
my_managed_control_input.twist.linear.x=0
my_managed_control_input.twist.angular.y=0
my_managed_control_input.twist.angular.z=0
rospy.logwarn_throttle(60, "[CONTROL_INPUT_MANAGER_NODE] Soft Estop is still enabled which will prevent any motion")
self.managed_pub.publish(my_managed_control_input)
self.seq += 1
def move_base_cb(self, move_base_cmd_vel):
if (move_base_cmd_vel.linear.x, move_base_cmd_vel.angular.y, move_base_cmd_vel.angular.z) != (0,0,0):
self.last_move_base_command_time = rospy.Time.now()
self.move_base_control_input_request.linear.x = move_base_cmd_vel.linear.x
self.move_base_control_input_request.linear.y = move_base_cmd_vel.linear.y
self.move_base_control_input_request.angular.z = move_base_cmd_vel.angular.z * 1.4 ##Fudge factor, remove when switched to closed loop control on rr_openrover_basic
def auto_dock_cb(self, auto_dock_cmd_vel):
if (auto_dock_cmd_vel.twist.linear.x, auto_dock_cmd_vel.twist.angular.y, auto_dock_cmd_vel.twist.angular.z) != (0,0,0):
self.last_auto_dock_command_time = rospy.Time.now()
self.auto_dock_control_input_request = auto_dock_cmd_vel
def fleet_manager_cb(self, fleet_manager_cmd_vel):
self.last_fleet_manager_command_time = rospy.Time.now()
if (fleet_manager_cmd_vel.linear.x, fleet_manager_cmd_vel.angular.y, fleet_manager_cmd_vel.angular.z) != (0,0,0):
self.remote_control_lock = True
self.fleet_manager_control_input_request = fleet_manager_cmd_vel
def keyboard_cb(self, keyboard_cmd_vel):
# If a user starts to command the robot with a joystick, set local lock
if (keyboard_cmd_vel.linear.x, keyboard_cmd_vel.angular.y, keyboard_cmd_vel.angular.z) != (0,0,0):
self.last_keyboard_command_time = rospy.Time.now()
self.local_control_lock = True
self.keyboard_control_input_request = keyboard_cmd_vel
def joystick_cb(self, joy_cmd_vel):
# If a user starts to command the robot with a joystick, set local lock
if (joy_cmd_vel.twist.linear.x, joy_cmd_vel.twist.angular.y, joy_cmd_vel.twist.angular.z) != (0,0,0):
self.last_joy_command_time = joy_cmd_vel.header.stamp
self.local_control_lock = True
self.joy_control_input_request = joy_cmd_vel
def soft_estop_enable_cb(self, data):
if data.data == True:
self.soft_estop = True
cancel_msg=GoalID()
self.move_base_cancel.publish(cancel_msg)
stop_msg = Bool()
stop_msg.data = True
self.auto_dock_cancel.publish(stop_msg)
rospy.logwarn("[CONTROL_INPUT_MANAGER_NODE] Soft E-Stop Enabled")
def soft_estop_reset_cb(self, data):
if data.data == True:
self.soft_estop = False
rospy.logwarn("[CONTROL_INPUT_MANAGER_NODE] Soft E-Stop reset")
def lock_release_cb(self):
self.local_control_lock = False
self.remote_control_lock = False
if __name__ == '__main__':
rospy.init_node("control_input_manager_node")
my_manager = CmdVelManager()
cmd_managed_timer = rospy.Timer(rospy.Duration(0.1), my_manager.control_input_pub)
rospy.spin()
| StarcoderdataPython |
3294251 | import logging
from dotenv import find_dotenv, dotenv_values
def load_config():
""" Load the variables from the .env file
Returns:
.env variables(dict)
"""
logger = logging.getLogger(__name__)
dot_env_path = find_dotenv(raise_error_if_not_found=True)
logger.info(f"Found config in {dot_env_path}")
return dotenv_values(dot_env_path)
| StarcoderdataPython |
3263412 | class Solution:
def numSubarraysWithSum(self, A: List[int], S: int) -> int:
P = [0]
for x in A: P.append(P[-1] + x)
count = collections.Counter()
ans = 0
for x in P:
ans += count[x]
count[x + S] += 1
return ans
| StarcoderdataPython |
1687006 | <reponame>Guo-T-W/Tacotron-WaveRNN
import tensorflow as tf
# Default hyperparameters
hparams = tf.contrib.training.HParams(
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners".
cleaners='english_cleaners',
###########################################################################################################################################
# Audio
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
num_freq=1025, # (= n_fft / 2 + 1) only used when adding linear spectrograms post processing network
rescale=True, # Whether to rescale audio prior to preprocessing
rescaling_max=0.999, # Rescaling value
trim_silence=True, # Whether to clip silence in Audio (at beginning and end of audio only, not the middle)
clip_mels_length=True, # For cases of OOM (Not really recommended, working on a workaround)
max_mel_frames=1300, # Only relevant when clip_mels_length = True
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
# It's preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
# Does not work if n_fft is not multiple of hop_size!!
use_lws=False,
silence_threshold=2, # silence threshold used for sound trimming for wavenet preprocessing
# Mel spectrogram
n_fft=2048, # Extra window size is filled with 0 paddings to match this parameter
hop_size=275, # For 22050Hz, 275 ~= 12.5 ms
win_size=1100, # For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft)
sample_rate=22050, # 22050 Hz (corresponding to ljspeech dataset)
frame_shift_ms=None,
# M-AILABS (and other datasets) trim params
trim_fft_size=512,
trim_hop_size=128,
trim_top_db=23,
# Mel and Linear spectrograms normalization/scaling and clipping
signal_normalization=True,
allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
symmetric_mels=True, # Whether to scale the data to be symmetric around 0
max_abs_value=4., # max absolute value of data. If symmetric, data will be [-max, max] else [0, max]
normalize_for_wavenet=True, # whether to rescale to [0, 1] for wavenet.
# Contribution by @begeekmyfriend
# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude levels. Also allows for better G&L phase reconstruction)
preemphasize=True, # whether to apply filter
preemphasis=0.97, # filter coefficient.
# Limits
min_level_db=-100,
ref_level_db=20,
fmin=55, # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
fmax=7600,
# Griffin Lim
power=1.5,
griffin_lim_iters=60,
###########################################################################################################################################
# Tacotron
outputs_per_step=1, # number of frames to generate at each decoding step (speeds up computation and allows for higher batch size)
stop_at_any=True, # Determines whether the decoder should stop when predicting <stop> to any frame or to all of them
embedding_dim=512, # dimension of embedding space
enc_conv_num_layers=3, # number of encoder convolutional layers
enc_conv_kernel_size=(5, ), # size of encoder convolution filters for each layer
enc_conv_channels=512, # number of encoder convolutions filters for each layer
encoder_lstm_units=256, # number of lstm units for each direction (forward and backward)
smoothing=False, # Whether to smooth the attention normalization function
attention_dim=128, # dimension of attention space
attention_filters=32, # number of attention convolution filters
attention_kernel=(31, ), # kernel size of attention convolution
cumulative_weights=True, # Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True)
prenet_layers=[256, 256], # number of layers and number of units of prenet
decoder_layers=2, # number of decoder lstm layers
decoder_lstm_units=1024, # number of decoder lstm units on each layer
max_iters=2000, # Max decoder steps during inference (Just for safety from infinite loop cases)
postnet_num_layers=5, # number of postnet convolutional layers
postnet_kernel_size=(5, ), # size of postnet convolution filters for each layer
postnet_channels=512, # number of postnet convolution filters for each layer
# CBHG mel->linear postnet
cbhg_kernels=8, # All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act as "K-grams"
cbhg_conv_channels=128, # Channels of the convolution bank
cbhg_pool_size=2, # pooling size of the CBHG
cbhg_projection=256, # projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels)
cbhg_projection_kernel_size=3, # kernel_size of the CBHG projections
cbhg_highwaynet_layers=4, # Number of HighwayNet layers
cbhg_highway_units=128, # Number of units used in HighwayNet fully connected layers
cbhg_rnn_units=128, # Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in shape
mask_encoder=False, # whether to mask encoder padding while computing attention
mask_decoder=False, # Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not be weighted, else recommended pos_weight = 20)
cross_entropy_pos_weight=1, # Use class weights to reduce the stop token classes imbalance (by adding more penalty on False Negatives (FN)) (1 = disabled)
predict_linear=True, # Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)
###########################################################################################################################################
# Wavenet
# Input type:
# 1. raw [-1, 1]
# 2. mulaw [-1, 1]
# 3. mulaw-quantize [0, mu]
# If input_type is raw or mulaw, network assumes scalar input and
# discretized mixture of logistic distributions output, otherwise one-hot
# input and softmax output are assumed.
input_type="raw",
quantize_channels=2 ** 16, # 65536 (16-bit) (raw) or 256 (8-bit) (mulaw or mulaw-quantize) // number of classes = 256 <=> mu = 255
log_scale_min=-14., # Mixture of logistic distributions minimal log scale
log_scale_min_gauss=-7., # Gaussian distribution minimal allowed log scale
# To use Gaussian distribution as output distribution instead of mixture of logistics, set "out_channels = 2" instead of "out_channels = 10 * 3". (UNDER TEST)
out_channels=2, # This should be equal to quantize channels when input type is 'mulaw-quantize' else: num_distributions * 3 (prob, mean, log_scale).
layers=20, # Number of dilated convolutions (Default: Simplified Wavenet of Tacotron-2 paper)
stacks=2, # Number of dilated convolution stacks (Default: Simplified Wavenet of Tacotron-2 paper)
residual_channels=128, # Number of residual block input/output channels.
gate_channels=256, # split in 2 in gated convolutions
skip_out_channels=128, # Number of residual block skip convolution channels.
kernel_size=3, # The number of inputs to consider in dilated convolutions.
cin_channels=80, # Set this to -1 to disable local conditioning, else it must be equal to num_mels!!
upsample_conditional_features=True, # Whether to repeat conditional features or upsample them (The latter is recommended)
upsample_type='1D', # Type of the upsampling deconvolution. Can be ('1D' or '2D'). 1D spans all frequency bands for each frame while 2D spans "freq_axis_kernel_size" bands at a time
upsample_activation='LeakyRelu', # Activation function used during upsampling. Can be ('LeakyRelu', 'Relu' or None)
upsample_scales=[5, 5, 11], # prod(upsample_scales) should be equal to hop_size
freq_axis_kernel_size=3, # Only used for 2D upsampling. This is the number of requency bands that are spanned at a time for each frame.
leaky_alpha=0.4, # slope of the negative portion of LeakyRelu (LeakyRelu: y=x if x>0 else y=alpha * x)
gin_channels=-1, # Set this to -1 to disable global conditioning, Only used for multi speaker dataset. It defines the depth of the embeddings (Recommended: 16)
use_speaker_embedding=True, # whether to make a speaker embedding
n_speakers=5, # number of speakers (rows of the embedding)
use_bias=True, # Whether to use bias in convolutional layers of the Wavenet
max_time_sec=None, # Max time of audio for training. If None, we use max_time_steps.
max_time_steps=11000, # Max time steps in audio used to train wavenet (decrease to save memory) (Recommend: 8000 on modest GPUs, 13000 on stronger ones)
###########################################################################################################################################
# Tacotron Training
tacotron_random_seed=5339, # Determines initial graph and operations (i.e: model) random state for reproducibility
tacotron_swap_with_cpu=False, # Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause major slowdowns! Only use when critical!)
tacotron_batch_size=32, # number of training samples on each training steps
tacotron_reg_weight=1e-6, # regularization weight (for L2 regularization)
tacotron_scale_regularization=False, # Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model)
tacotron_test_size=None, # % of data to keep as test data, if None, tacotron_test_batches must be not None
tacotron_test_batches=48, # number of test batches (For Ljspeech: 10% ~= 41 batches of 32 samples)
tacotron_data_random_state=1234, # random state for train test split repeatability
# Usually your GPU can handle 16x tacotron_batch_size during synthesis for the same memory amount during training (because no gradients to keep and ops to register for backprop)
tacotron_synthesis_batch_size=32 * 16, # This ensures GTA synthesis goes up to 40x faster than one sample at a time and uses 100% of your GPU computation power.
tacotron_decay_learning_rate=True, # boolean, determines if the learning rate will follow an exponential decay
tacotron_start_decay=50000, # Step at which learning decay starts
tacotron_decay_steps=50000, # Determines the learning rate decay slope (UNDER TEST)
tacotron_decay_rate=0.4, # learning rate decay rate (UNDER TEST)
tacotron_initial_learning_rate=1e-3, # starting learning rate
tacotron_final_learning_rate=1e-5, # minimal learning rate
tacotron_adam_beta1=0.9, # AdamOptimizer beta1 parameter
tacotron_adam_beta2=0.999, # AdamOptimizer beta2 parameter
tacotron_adam_epsilon=1e-6, # AdamOptimizer Epsilon parameter
tacotron_zoneout_rate=0.1, # zoneout rate for all LSTM cells in the network
tacotron_dropout_rate=0.5, # dropout rate for all convolutional layers + prenet
tacotron_clip_gradients=True, # whether to clip gradients
natural_eval=False, # Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same teacher-forcing ratio as in training (just for overfit)
# Decoder RNN learning can take be done in one of two ways:
# Teacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode='constant'
# Curriculum Learning Scheme: From Teacher-Forcing to sampling from previous outputs is function of global step. (teacher forcing ratio decay) mode='scheduled'
# The second approach is inspired by:
# Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks.
# Can be found under: https://arxiv.org/pdf/1506.03099.pdf
tacotron_teacher_forcing_mode='constant', # Can be ('constant' or 'scheduled'). 'scheduled' mode applies a cosine teacher forcing ratio decay. (Preference: scheduled)
tacotron_teacher_forcing_ratio=1., # Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs, Only relevant if mode='constant'
tacotron_teacher_forcing_init_ratio=1., # initial teacher forcing ratio. Relevant if mode='scheduled'
tacotron_teacher_forcing_final_ratio=0., # final teacher forcing ratio. Relevant if mode='scheduled'
tacotron_teacher_forcing_start_decay=10000, # starting point of teacher forcing ratio decay. Relevant if mode='scheduled'
tacotron_teacher_forcing_decay_steps=280000, # Determines the teacher forcing ratio decay slope. Relevant if mode='scheduled'
tacotron_teacher_forcing_decay_alpha=0., # teacher forcing ratio decay rate. Relevant if mode='scheduled'
###########################################################################################################################################
# Wavenet Training
wavenet_random_seed=5339, # S=5, E=3, D=9 :)
wavenet_swap_with_cpu=False, # Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause major slowdowns! Only use when critical!)
wavenet_batch_size=8, # batch size used to train wavenet.
wavenet_test_size=0.0441, # % of data to keep as test data, if None, wavenet_test_batches must be not None
wavenet_test_batches=None, # number of test batches.
wavenet_data_random_state=1234, # random state for train test split repeatability
# During synthesis, there is no max_time_steps limitation so the model can sample much longer audio than 8k(or 13k) steps. (Audio can go up to 500k steps, equivalent to ~21sec on 24kHz)
# Usually your GPU can handle ~2x wavenet_batch_size during synthesis for the same memory amount during training (because no gradients to keep and ops to register for backprop)
wavenet_synthesis_batch_size=10 * 2, # This ensure that wavenet synthesis goes up to 4x~8x faster when synthesizing multiple sentences. Watch out for OOM with long audios.
wavenet_lr_schedule='exponential', # learning rate schedule. Can be ('exponential', 'noam')
wavenet_learning_rate=1e-4, # wavenet initial learning rate
wavenet_warmup=4000., # Only used with 'noam' scheme. Defines the number of ascending learning rate steps.
wavenet_decay_rate=0.5, # Only used with 'exponential' scheme. Defines the decay rate.
wavenet_decay_steps=300000, # Only used with 'exponential' scheme. Defines the decay steps.
wavenet_adam_beta1=0.9, # Adam beta1
wavenet_adam_beta2=0.999, # Adam beta2
wavenet_adam_epsilon=1e-8, # Adam Epsilon
wavenet_clip_gradients=False, # Whether the clip the gradients during wavenet training.
wavenet_ema_decay=0.9999, # decay rate of exponential moving average
wavenet_weight_normalization=False, # Whether to Apply Saliman & Kingma Weight Normalization (reparametrization) technique. (NEEDS VERIFICATION)
wavenet_init_scale=1., # Only relevent if weight_normalization=True. Defines the initial scale in data dependent initialization of parameters.
wavenet_dropout=0.05, # drop rate of wavenet layers
train_with_GTA=True, # Whether to use GTA mels to train WaveNet instead of ground truth mels.
###########################################################################################################################################
# Eval sentences (if no eval file was specified, these sentences are used for eval)
sentences=[
# From July 8, 2017 New York Times:
'Scientists at the CERN laboratory say they have discovered a new particle.',
'There\'s a way to measure the acute emotional intelligence that has never gone out of style.',
'President Trump met with other leaders at the Group of 20 conference.',
'The Senate\'s bill to repeal and replace the Affordable Care Act is now imperiled.',
# From Google's Tacotron example page:
'Generative adversarial network or variational auto-encoder.',
'Basilar membrane and otolaryngology are not auto-correlations.',
'He has read the whole thing.',
'He reads books.',
'He thought it was time to present the present.',
'Thisss isrealy awhsome.',
'Punctuation sensitivity, is working.',
'Punctuation sensitivity is working.',
"<NAME> picked a peck of pickled peppers. How many pickled peppers did <NAME> pick?",
"She sells sea-shells on the sea-shore. The shells she sells are sea-shells I'm sure.",
"Tajima Airport serves Toyooka.",
# From The web (random long utterance)
'Sequence to sequence models have enjoyed great success in a variety of tasks such as machine translation, speech recognition, and text summarization.\
This project covers a sequence to sequence model trained to predict a speech representation from an input sequence of characters. We show that\
the adopted architecture is able to perform this task with wild success.',
'Thank you so much for your support!',
]
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences']
return 'Hyperparameters:\n' + '\n'.join(hp)
| StarcoderdataPython |
1744367 | import sys, os
base_path = os.path.dirname(os.path.realpath(__file__)).split('reproduce_results')[0]
sys.path.append(base_path)
from helper_functions import wilcoxon_statistical_test
import json
import warnings
warnings.filterwarnings('ignore')
path = base_path+"Datasets/mutagenesis/Results"
print()
print('#'*50)
print('On {} KB'.format(path.split('/')[-2].upper()))
print('#'*50)
with open(path+"/concept_learning_results_celoe_clp.json") as results_clp:
clp_data = json.load(results_clp)
with open(path+"/concept_learning_results_celoe.json") as results_celoe:
celoe_data = json.load(results_celoe)
with open(path+"/concept_learning_results_ocel.json") as results_ocel:
ocel_data = json.load(results_ocel)
with open(path+"/concept_learning_results_eltl.json") as results_eltl:
eltl_data = json.load(results_eltl)
valid_attributes = [attribute for attribute in celoe_data if attribute not in ['Prediction', 'Learned Concept']]
for algo, algo_data in zip(['CELOE', 'OCEL', 'ELTL'], [celoe_data, ocel_data, eltl_data]):
statistical_test_results_dict = {attr: dict() for attr in valid_attributes}
print()
print('*'*40)
print('Statistics CELOE-CLP vs {}'.format(algo))
print('*'*40)
for attribute in valid_attributes:
print("Test on "+attribute+":")
data1 = algo_data[attribute]
data2 = clp_data[attribute]
stats, p = wilcoxon_statistical_test(data1, data2)
print()
statistical_test_results_dict[attribute].update({"p-value": p, "stats": stats})
with open(path+"/wilcoxon_statistical_test_CELOE-CLP_vs_{}.json".format(algo), "w") as stat_test:
json.dump(statistical_test_results_dict, stat_test, indent=3)
print("\nStatistical test results saved in "+path+"/") | StarcoderdataPython |
37043 | class Stack():
def __init__(self):
self.items=[]
def isEmpty(self):
return self.items==[]
def push(self,item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[-1]
def size(self):
return len(self.items)
s=Stack()
s.isEmpty()
s.push(5)
s.push(56)
s.push('sdfg')
s.push(9)
print(s.peek())
print(s.pop())
print(s.peek())
print(s.size())
| StarcoderdataPython |
4821060 | import logging
from flask import jsonify, request
import flask_login
import mediacloud.error
from server import app, mc
import server.views.apicache as base_apicache
from server.auth import user_mediacloud_client, user_mediacloud_key
from server.util.request import form_fields_required, api_error_handler, json_error_response, arguments_required
from server.util.stringutil import ids_from_comma_separated_str
from server.util.tags import US_COLLECTIONS
from server.views.topics import concatenate_solr_dates
from server.views.media_picker import concatenate_query_for_solr, custom_collection_as_solr_query
from server.views.topics.foci.retweetpartisanship import add_retweet_partisanship_to_topic
from server.views.topics.topic import topic_summary
logger = logging.getLogger(__name__)
VERSION_1 = 1
def _topic_query_from_request():
# helper to centralize parsing of request params in any create preview widgets
q = concatenate_query_for_solr(solr_seed_query=request.form['q'],
media_ids=ids_from_comma_separated_str(request.form['sources[]'])
if 'sources[]' in request.form else None,
tags_ids=ids_from_comma_separated_str(request.form['collections[]'])
if 'collections[]' in request.form else None,
custom_ids=request.form['searches[]'])
fq = concatenate_solr_dates(start_date=request.form['start_date'],
end_date=request.form['end_date'])
return q, fq
@app.route('/api/topics/create/preview/split-story/count', methods=['POST'])
@flask_login.login_required
@form_fields_required('q')
@api_error_handler
def api_topics_preview_split_story_count():
solr_query, fq = _topic_query_from_request()
results = base_apicache.story_count(user_mediacloud_key(), solr_query, fq, split=True)
total_stories = 0
for c in results['counts']:
total_stories += c['count']
results['total_story_count'] = total_stories
return jsonify({'results': results})
@app.route('/api/topics/create/preview/story/count', methods=['POST'])
@flask_login.login_required
@form_fields_required('q')
@api_error_handler
def api_topics_preview_story_count():
solr_query, fq = _topic_query_from_request()
story_count_result = base_apicache.story_count(user_mediacloud_key(), solr_query, fq)
# maybe check admin role before we run this?
return jsonify(story_count_result) # give them back new data, so they can update the client
@app.route('/api/topics/create/preview/stories/sample', methods=['POST'])
@flask_login.login_required
@form_fields_required('q')
@api_error_handler
def api_topics_preview_story_sample():
solr_query, fq = _topic_query_from_request()
num_stories = request.form['rows']
story_count_result = base_apicache.story_list(user_mediacloud_key(), solr_query, fq, sort=mc.SORT_RANDOM, rows=num_stories)
return jsonify(story_count_result)
@app.route('/api/topics/create/preview/words/count', methods=['POST'])
@flask_login.login_required
@form_fields_required('q')
@api_error_handler
def api_topics_preview_word_count():
solr_query, fq = _topic_query_from_request()
word_count_result = base_apicache.word_count(user_mediacloud_key(), solr_query, fq)
return jsonify(word_count_result) # give them back new data, so they can update the client
@app.route('/api/topics/create', methods=['PUT'])
@flask_login.login_required
@form_fields_required('name', 'description', 'solr_seed_query', 'start_date', 'end_date')
@api_error_handler
def topic_create():
user_mc = user_mediacloud_client()
name = request.form['name']
description = request.form['description']
solr_seed_query = request.form['solr_seed_query']
start_date = request.form['start_date']
end_date = request.form['end_date']
optional_args = {
'is_public': request.form['is_public'] if 'is_public' in request.form else None,
'is_logogram': request.form['is_logogram'] if 'is_logogram' in request.form else None,
'ch_monitor_id': request.form['ch_monitor_id'] if len(request.form['ch_monitor_id']) > 0 and request.form['ch_monitor_id'] != 'null' else None,
'max_iterations': request.form['max_iterations'] if 'max_iterations' in request.form else None,
'max_stories': request.form['max_stories'] if 'max_stories' in request.form and request.form['max_stories'] != 'null' else flask_login.current_user.profile['limits']['max_topic_stories'],
}
# parse out any sources and collections, or custom collections to add
media_ids_to_add = ids_from_comma_separated_str(request.form['sources[]'])
tag_ids_to_add = ids_from_comma_separated_str(request.form['collections[]'])
custom_collections_clause = custom_collection_as_solr_query(request.form['searches[]'])
if len(custom_collections_clause) > 0:
solr_seed_query = '{} OR {}'.format(solr_seed_query, custom_collections_clause)
try:
topic_result = user_mc.topicCreate(name=name, description=description, solr_seed_query=solr_seed_query,
start_date=start_date, end_date=end_date, media_ids=media_ids_to_add,
media_tags_ids=tag_ids_to_add, **optional_args)['topics'][0]
topics_id = topic_result['topics_id']
logger.info("Created new topic \"{}\" as {}".format(name, topics_id))
# if this includes any of the US-centric collections, add the retweet partisanship subtopic by default
if set(tag_ids_to_add).intersection(US_COLLECTIONS):
add_retweet_partisanship_to_topic(topic_result['topics_id'],
'Retweet Partisanship',
'Subtopics driven by our analysis of Twitter followers of Trump and Clinton during the 2016 election season. Each media soure is scored based on the ratio of retweets of their stories in those two groups.')
# client will either make a empty snapshot, or a spidering one
return topic_summary(topics_id)
except Exception as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(str(e), 500)
except mediacloud.error.MCException as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(e.message, e.status_code)
@app.route('/api/topics/name-exists', methods=['GET'])
@flask_login.login_required
@arguments_required('searchStr')
@api_error_handler
def topic_name_exists():
# Check if topic with name exists already
# Have to do this in a unique method, instead of in topic_search because we need to use an admin connection
# to media cloud to list all topics, but we don't want to return topics a user can't see to them.
# :return: boolean indicating if topic with this name exists for not (case insensive check)
search_str = request.args['searchStr']
topics_id = int(request.args['topicId']) if 'topicId' in request.args else None
matching_topics = mc.topicList(name=search_str, limit=15)
if topics_id:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']
if t['topics_id'] != topics_id]
else:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']]
name_in_use = search_str.lower() in matching_topic_names
return jsonify({'nameInUse': name_in_use})
| StarcoderdataPython |
1627270 | from setuptools import setup
import braces
setup(
name="django-braces",
version=braces.__version__,
description="Reusable, generic mixins for Django",
long_description="Mixins to add easy functionality to Django class-based views, forms, and models.",
keywords="django, views, forms, mixins",
author="<NAME> <<EMAIL>>, <NAME> <<EMAIL>>",
author_email="<EMAIL>",
url="https://github.com/brack3t/django-braces/",
license="BSD",
packages=["braces"],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
"Environment :: Web Environment",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0"
],
)
| StarcoderdataPython |
1793559 | <filename>purdy/colour/plainco.py
from purdy.parser import FoldedCodeLine
# =============================================================================
# Plain Colourizer: the colourizer that doesn't do colour, handles plain text
# augmentation like line numbers for uncolourized display
class PlainColourizer:
@classmethod
def colourize(cls, code_line):
"""Returns the plain text version of the code line.
:param code_line: a :class:`CodeLine` object to process
"""
if isinstance(code_line, FoldedCodeLine):
return '⋮'
output = []
if code_line.line_number != -1:
output.append( cls.line_number(code_line.line_number) )
output.extend([part.text for part in code_line.parts])
return ''.join(output)
@classmethod
def line_number(cls, num):
"""Returns a colourized version of a line number"""
return f'{num:3} '
| StarcoderdataPython |
3312566 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: srinath.h
#
# Created: 13/05/2012
# Copyright: (c) srinath.h 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import datetime
def get_full_day_datetime_range(curdatetime):
startdatetimerange = datetime.datetime.combine(curdatetime.date(),datetime.time(0,0,0))
enddatetimerange = datetime.datetime.combine(curdatetime.date(),datetime.time(23,59,59))
return (startdatetimerange,enddatetimerange)
def get_full_week_datetime_range(curdatetime):
weekstuff = {6:(0,6),0:(-1,5),1:(-2,4),2:(-3,3),3:(-4,2),4:(-5,1),5:(-6,0)}
curdate = curdatetime.date()
curweekday = curdate.weekday()
startdatetimerange = datetime.datetime.combine(curdate+datetime.timedelta(weekstuff[curweekday][0]),datetime.time(0,0,0))
enddatetimerange =datetime.datetime.combine(curdate+datetime.timedelta(weekstuff[curweekday][1]),datetime.time(23,59,59))
return(startdatetimerange,enddatetimerange)
def get_full_month_datetime_range(curdatetime):
curmonth = curdatetime.month
newmonth = curmonth
wrkdatetime = curdatetime
td = datetime.timedelta(1)
while(newmonth==curmonth):
wrkdatetime += td
newmonth = wrkdatetime.month
wrkdatetime -= td
lastdate = wrkdatetime.day
curyear = curdatetime.year
startdatetimerange = datetime.datetime.combine(datetime.date(curyear, curmonth,1),datetime.time(0,0,0))
enddatetimerange = datetime.datetime.combine(datetime.date(curyear,curmonth,lastdate),datetime.time(23,59,59))
return(startdatetimerange,enddatetimerange)
| StarcoderdataPython |
1787178 | <gh_stars>1-10
import os
import shutil
from . import asciifileparser
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def parse(filePath):
"""
Returns a full parsed Maya ASCII file.
:type filePath: str
:rtype: mason.asciiscene.AsciiScene
"""
return asciifileparser.AsciiFileParser(filePath).scene
def createNewScene(destination):
"""
Copies the untitled scene file to the specified location.
:type destination: str
:rtype: None
"""
try:
source = os.path.join(os.path.dirname(__file__), 'tests', 'untitled.ma')
return shutil.copyfile(source, destination)
except shutil.SameFileError as exception:
log.debug(exception)
return ''
| StarcoderdataPython |
160497 | import MySQLdb
from Model.pessoa_model import PessoaModel
class PessoaDao:
# --- Inicialização da conecção com o servidor local
# --- Inicialização do cursor para manter conecção
def __init__(self):
self.connection = MySQLdb.connect(host='mysql.padawans.dev',database='padawans16',user='padawans16',passwd="<PASSWORD>")
self.cursor = self.connection.cursor()
# --- Metodo para buscar todos os registro de uma tabela local
# --- Retorna uma lista de tuplas e cada tupla convertida em dicionario de classe Model
# --- Adicionado cada dicionario na lista que retorna
def get_all(self):
self.cursor.execute("SELECT * FROM CLIENTE")
many_people = self.cursor.fetchall()
list_people = []
for people in many_people:
people = PessoaModel(people[1],people[2],people[3],people[4],people[5],people[6],people[0])
list_people.append(people.__dict__)
return list_people
def get_by_id(self, codigo):
self.cursor.execute("SELECT * FROM CLIENTE WHERE CODIGO = {}".format(codigo))
people = self.cursor.fetchone()
p = PessoaModel(people[1],people[2],people[3],people[4],people[5],people[6],people[0])
return p.__dict__
def insert(self, pessoa : PessoaModel):
self.cursor.execute("""
INSERT INTO CLIENTE
(NOME, SOBRENOME, IDADE, GENERO, EMAIL, TELEFONE)
VALUES('{}','{}',{},'{}','{}','{}')""".format(pessoa.nome, pessoa.sobrenome, pessoa.idade, pessoa.genero, pessoa.email, pessoa.telefone))
self.connection.commit()
codigo = self.cursor.lastrowid
pessoa.codigo = codigo
return pessoa.__dict__
def update(self, pessoa : PessoaModel):
self.cursor.execute("""
UPDATE CLIENTE
SET
NOME = '{}',
SOBRENOME = '{}',
IDADE = {},
GENERO = '{}',
EMAIL = '{}',
TELEFONE = '{}'
WHERE CODIGO = {}
""".format(pessoa.nome, pessoa.sobrenome, pessoa.idade, pessoa.genero, pessoa.email, pessoa.telefone, pessoa.codigo))
self.connection.commit()
return pessoa.__dict__
def remove(self, codigo):
self.cursor.execute("DELETE FROM CLIENTE WHERE CODIGO = {}".format(codigo))
self.connection.commit()
return 'Removido a pessoa de id: {}'.format(codigo)
| StarcoderdataPython |
19674 | <reponame>maxburke/arrow<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import pyarrow as pa
import pyarrow.fs as fs
try:
import pyarrow.dataset as ds
except ImportError:
ds = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not dataset'
pytestmark = pytest.mark.dataset
@pytest.fixture
@pytest.mark.parquet
def mockfs():
import pyarrow.parquet as pq
mockfs = fs._MockFileSystem()
data = [
list(range(5)),
list(map(float, range(5)))
]
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
batch = pa.record_batch(data, schema=schema)
table = pa.Table.from_batches([batch])
directories = [
'subdir/1/xxx',
'subdir/2/yyy',
]
for i, directory in enumerate(directories):
path = '{}/file{}.parquet'.format(directory, i)
mockfs.create_dir(directory)
with mockfs.open_output_stream(path) as out:
pq.write_table(table, out)
return mockfs
@pytest.fixture
def dataset(mockfs):
format = ds.ParquetFileFormat()
selector = fs.FileSelector('subdir', recursive=True)
options = ds.FileSystemDiscoveryOptions('subdir')
discovery = ds.FileSystemDataSourceDiscovery(mockfs, selector, format,
options)
discovery.partition_scheme = ds.SchemaPartitionScheme(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
source = discovery.finish()
schema = discovery.inspect()
return ds.Dataset([source], schema)
def test_filesystem_data_source(mockfs):
file_format = ds.ParquetFileFormat()
paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet']
partitions = [ds.ScalarExpression(True), ds.ScalarExpression(True)]
source = ds.FileSystemDataSource(mockfs, paths, partitions,
source_partition=None,
file_format=file_format)
source_partition = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('source'),
ds.ScalarExpression(1337)
)
partitions = [
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('part'),
ds.ScalarExpression(1)
),
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('part'),
ds.ScalarExpression(2)
)
]
source = ds.FileSystemDataSource(mockfs, paths, partitions,
source_partition=source_partition,
file_format=file_format)
assert source.partition_expression.equals(source_partition)
def test_dataset(dataset):
assert isinstance(dataset, ds.Dataset)
assert isinstance(dataset.schema, pa.Schema)
# TODO(kszucs): test non-boolean expressions for filter do raise
builder = dataset.new_scan()
assert isinstance(builder, ds.ScannerBuilder)
scanner = builder.finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for task in scanner.scan():
assert isinstance(task, ds.ScanTask)
for batch in task.execute():
assert batch.column(0).equals(expected_i64)
assert batch.column(1).equals(expected_f64)
table = scanner.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
condition = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('i64'),
ds.ScalarExpression(1)
)
scanner = dataset.new_scan().use_threads(True).filter(condition).finish()
result = scanner.to_table()
assert result.to_pydict() == {
'i64': [1, 1],
'f64': [1., 1.],
'group': [1, 2],
'key': ['xxx', 'yyy']
}
def test_scanner_builder(dataset):
builder = ds.ScannerBuilder(dataset, memory_pool=pa.default_memory_pool())
scanner = builder.finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
with pytest.raises(pa.ArrowInvalid):
dataset.new_scan().project(['unknown'])
builder = dataset.new_scan(memory_pool=pa.default_memory_pool())
scanner = builder.project(['i64']).finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
for task in scanner.scan():
for batch in task.execute():
assert batch.num_columns == 1
def test_abstract_classes():
classes = [
ds.FileFormat,
ds.Scanner,
ds.DataSource,
ds.Expression,
ds.PartitionScheme,
]
for klass in classes:
with pytest.raises(TypeError):
klass()
def test_partition_scheme():
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
for klass in [ds.SchemaPartitionScheme, ds.HivePartitionScheme]:
scheme = klass(schema)
assert isinstance(scheme, ds.PartitionScheme)
scheme = ds.SchemaPartitionScheme(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.float64())
])
)
expr = scheme.parse('/3/3.14')
assert isinstance(expr, ds.Expression)
expected = ds.AndExpression(
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('group'),
ds.ScalarExpression(3)
),
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('key'),
ds.ScalarExpression(3.14)
)
)
assert expr.equals(expected)
with pytest.raises(pa.ArrowInvalid):
scheme.parse('/prefix/3/aaa')
scheme = ds.HivePartitionScheme(
pa.schema([
pa.field('alpha', pa.int64()),
pa.field('beta', pa.int64())
])
)
expr = scheme.parse('/alpha=0/beta=3')
expected = ds.AndExpression(
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('alpha'),
ds.ScalarExpression(0)
),
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('beta'),
ds.ScalarExpression(3)
)
)
assert expr.equals(expected)
def test_expression():
a = ds.ScalarExpression(1)
b = ds.ScalarExpression(1.1)
c = ds.ScalarExpression(True)
equal = ds.ComparisonExpression(ds.CompareOperator.Equal, a, b)
assert equal.op() == ds.CompareOperator.Equal
and_ = ds.AndExpression(a, b)
assert and_.left_operand.equals(a)
assert and_.right_operand.equals(b)
assert and_.equals(ds.AndExpression(a, b))
assert and_.equals(and_)
ds.AndExpression(a, b, c)
ds.OrExpression(a, b)
ds.OrExpression(a, b, c)
ds.NotExpression(ds.OrExpression(a, b, c))
ds.IsValidExpression(a)
ds.CastExpression(a, pa.int32())
ds.CastExpression(a, pa.int32(), safe=True)
ds.InExpression(a, pa.array([1, 2, 3]))
condition = ds.ComparisonExpression(
ds.CompareOperator.Greater,
ds.FieldExpression('i64'),
ds.ScalarExpression(5)
)
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
assert condition.validate(schema) == pa.bool_()
i64_is_5 = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('i64'),
ds.ScalarExpression(5)
)
i64_is_7 = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('i64'),
ds.ScalarExpression(7)
)
assert condition.assume(i64_is_5).equals(ds.ScalarExpression(False))
assert condition.assume(i64_is_7).equals(ds.ScalarExpression(True))
assert str(condition) == "(i64 > 5:int64)"
@pytest.mark.parametrize('paths_or_selector', [
fs.FileSelector('subdir', recursive=True),
[
'subdir',
'subdir/1',
'subdir/1/xxx',
'subdir/1/xxx/file0.parquet',
'subdir/2',
'subdir/2/yyy',
'subdir/2/yyy/file1.parquet',
]
])
def test_file_system_discovery(mockfs, paths_or_selector):
format = ds.ParquetFileFormat()
options = ds.FileSystemDiscoveryOptions('subdir')
assert options.partition_base_dir == 'subdir'
assert options.ignore_prefixes == ['.', '_']
assert options.exclude_invalid_files is True
discovery = ds.FileSystemDataSourceDiscovery(
mockfs, paths_or_selector, format, options
)
assert isinstance(discovery.inspect(), pa.Schema)
assert isinstance(discovery.inspect_schemas(), list)
assert isinstance(discovery.finish(), ds.FileSystemDataSource)
assert isinstance(discovery.partition_scheme, ds.DefaultPartitionScheme)
assert discovery.root_partition.equals(ds.ScalarExpression(True))
discovery.partition_scheme = ds.SchemaPartitionScheme(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
data_source = discovery.finish()
assert isinstance(data_source, ds.DataSource)
inspected_schema = discovery.inspect()
dataset = ds.Dataset([data_source], inspected_schema)
scanner = dataset.new_scan().finish()
assert len(list(scanner.scan())) == 2
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for task, group, key in zip(scanner.scan(), [1, 2], ['xxx', 'yyy']):
expected_group_column = pa.array([group] * 5, type=pa.int32())
expected_key_column = pa.array([key] * 5, type=pa.string())
for batch in task.execute():
assert batch.num_columns == 4
assert batch[0].equals(expected_i64)
assert batch[1].equals(expected_f64)
assert batch[2].equals(expected_group_column)
assert batch[3].equals(expected_key_column)
table = scanner.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
assert table.num_columns == 4
| StarcoderdataPython |
23187 | # from pipet.core.sql.query_interface import *
from pypipet.core.operations.inventory import *
import pytest
from pprint import pprint
_supplie_id = 1
def test_update_invs(session, obj_classes, shop_conn):
invs = [ {'sku':'s22456', 'supplier_id':_supplie_id, 'qty':20}]
update_inventory_bulk(obj_classes, session,
invs, ignore_new=False)
res = get_inventory_by_sku(obj_classes, session,
invs[0]['sku'], by_supplier=False)
pprint(res)
# def test_match_upc(session, obj_classes):
# invs = [ {'upc':'48743213', 'supplier_id':1, 'qty':10},
# {'upc':'9348886', 'supplier_id':1, 'qty':10}]
# res = match_variation_sku_by_upc(obj_classes.get('variation'), session, invs)
# assert res is not None
def test_update_inv(session, obj_classes, shop_conn):
inv = {'sku':'s2789', 'supplier_id':_supplie_id, 'qty':82}
update_inventory_db_by_sku(obj_classes, session, inv['sku'], inv)
res = get_inventory_by_sku(obj_classes, session,
inv['sku'], by_supplier=False)
def test_update_front_shop_bulk(obj_classes, session, shop_conn):
update_instock_front_shop(obj_classes, session, shop_conn, set_inventory_management=True)
def test_update_front_shop(obj_classes, session, shop_conn):
update_instock_front_shop_by_sku(obj_classes, session, shop_conn, 's2789')
update_instock_front_shop_by_sku(obj_classes, session, shop_conn, 's22456') | StarcoderdataPython |
3333768 | <filename>scripts/install_on_ubuntu.py
#!/usr/bin/env python3
import json
import os
import shutil
import sys
from os import path
sys.path.append("src")
# noinspection PyPep8
from fvttmv.config import Keys
# noinspection PyPep8
from cli_wrapper.__constants import app_name, path_to_config_file_linux
path_to_executable_file = "/usr/bin/{0}".format(app_name)
def install():
print("Installing {0}".format(app_name))
if not os.path.exists("dist/{0}".format(app_name)):
raise Exception("No {0} found under dist/. Did you successfully build the project?".format(app_name))
path_to_foundry_data = input("Enter path to the 'Data' folder of your foundry data (for example "
"/home/user/foundrydata/Data): ")
if not path_to_foundry_data.endswith("Data"):
should_continue_str = input("The entered path does end with Data. Make sure you entered the right one. "
"Do you want to continue (y,n): ")
if not should_continue_str.lower() == "y":
print("Cancelling installation...")
exit()
if not path.exists(path_to_foundry_data):
should_continue_str = input("The entered path does not exist. "
"Do you want to continue (y,n): ")
if not should_continue_str.lower() == "y":
print("Cancelling installation...")
exit()
config_dict = \
{
Keys.absolute_path_to_foundry_data_key: path.abspath(path_to_foundry_data)
}
try:
with open(path_to_config_file_linux, "w+", encoding="utf-8") as config_fout:
json.dump(config_dict, config_fout)
print("Created config file {0}".format(path_to_config_file_linux))
except BaseException as error:
print(error)
print("Unable to write config file to {0}. Cancelling installation...".format(path_to_config_file_linux))
exit()
try:
shutil.copy("dist/{0}".format(app_name),
path_to_executable_file)
except BaseException as error:
print(error)
print("Unable to copy {0} to /usr/bin/. "
"Try running installer with sudo. "
"Cancelling installation...".format(app_name))
os.remove(path_to_config_file_linux)
exit()
try:
os.chmod(path_to_executable_file, 0o555)
except BaseException as error:
print(error)
print("Unable to make /usr/bin/fvttmv executable. "
"Try running installer with sudo. "
"Cancelling installation...")
os.remove(path_to_config_file_linux)
os.remove(path_to_executable_file)
exit()
print("Successfully installed {0}".format(app_name))
if __name__ == "__main__":
install()
| StarcoderdataPython |
1730580 | # Remove Element
#
# Given an array and a value, remove all instances of that value in-place and return the new length.
#
# Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
#
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#
# Example:
#
# Given nums = [3,2,2,3], val = 3,
#
# Your function should return length = 2, with the first two elements of nums being 2.
class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if nums is None or len(nums) == 0:
return 0
if len(nums) == 1:
if nums[0] == val:
return 0
else:
return 1
length = len(nums)
i = 0
j = len(nums) - 1
res = 1
while i < j:
while i < length:
if nums[i] == val:
break
i += 1
while j >= 0:
if nums[j] != val:
break
j -= 1
if i > j:
break
tmp = nums[i]
nums[i] = nums[j]
nums[j] = tmp
return i
s = Solution()
l = [3, 2, 2, 3]
s.removeElement(l, 3)
| StarcoderdataPython |
18766 | class BaseFunction:
def __init__(self, name, n_calls, internal_ns):
self._name = name
self._n_calls = n_calls
self._internal_ns = internal_ns
@property
def name(self):
return self._name
@property
def n_calls(self):
return self._n_calls
@property
def internal_ns(self):
return self._internal_ns
class Lines:
def __init__(self, line_str, n_calls, internal, external):
self._line_str = line_str
self._n_calls = n_calls
self._internal = internal
self._external = external
@property
def text(self):
return self._line_str
@property
def n_calls(self):
return self._n_calls
@property
def internal(self):
return self._internal
@property
def external(self):
return self._external
@property
def total(self):
return self.internal + self.external
class Function(BaseFunction):
def __init__(self, name, lines, n_calls, internal_ns):
self._name = name
self._lines = lines
self._n_calls = n_calls
self._internal_ns = internal_ns
@property
def lines(self):
return self._lines
@property
def name(self):
return self._name
@property
def n_calls(self):
return self._n_calls
@property
def internal_ns(self):
return self._internal_ns
@property
def total(self):
tot = 0
for line in self.lines:
tot += line.total
return tot + self.internal_ns
class Profile:
@staticmethod
def from_data(data):
profile = Profile()
profile._functions = []
for key, fdata in data['functions'].items():
lines = []
for line in fdata['lines']:
line = Lines(line['line_str'], line['n_calls'],
line['internal_ns'], line['external_ns'])
lines.append(line)
func = Function(lines=lines, name=fdata['name'],
n_calls=fdata['n_calls'],
internal_ns=fdata['internal_ns'])
profile._functions.append(func)
return profile
@property
def functions(self):
return self._functions
| StarcoderdataPython |
132843 | <filename>PhotoNAS/dataset.py
import numpy as np
import os
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from matplotlib import pyplot as plt
class TransferDataset(Dataset):
def __init__(self, content_dir):
super(TransferDataset, self).__init__()
self.content_dir = content_dir
self.content_name_list = self.get_name_list(self.content_dir)
self.transforms = self.transform()
def get_name_list(self, name):
name_list = os.listdir(name)
name_list = [os.path.join(name, i) for i in name_list]
np.random.shuffle(name_list)
return name_list
def transform(self):
data_transform = transforms.Compose([
# transforms.RandomRotation(15),
# transforms.RandomResizedCrop(size=512, scale=(0.5, 1.0)),
# transforms.RandomHorizontalFlip(),
transforms.Resize((512, 512)),
transforms.ToTensor()
])
return data_transform
def __len__(self):
a = len(self.content_name_list)
return a
def __getitem__(self, item):
img = Image.open(self.content_name_list[item]).convert('RGB')
img_out = self.transforms(img)
return img_out
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.