text
stringlengths
4
1.02M
meta
dict
import PlayerClass import ActionMethods import Log import Items import copy import random class Sym: # logName - string def __init__(self,logName): self.listOfTeams = [] # Team - a list of players with some additional stuff. self.dayDuration = 12 # Amount of hours in a day self.hourlyCalorieCost = 50 # calorie cost per hour - no matter the action self.theLog = Log.LogObject(logName) # log where stuff is going to be written. For now only for testing purposes. # runs symulation for one day def runForADay(self): for t in self.listOfTeams: t.resetPlayersTime() stringToWrite = "---Status:\n" for team in self.listOfTeams: for p in team.playerList: stringToWrite+=p.getStringStatus(1)+ "\tinventory\n"+ p.getStringItemList(2) self.theLog.writeToLog(stringToWrite) allFinished = False while not allFinished: random.shuffle(self.listOfTeams) # this shuffle is so that one team doesn't always go first result = self.findEarlyiestPlayer() # To implement - teams make actions as a group. Sometimes only part of the team does the action. if result[0] is None: allFinished = self.areAllFinished() break playersPerformingAction = result[1].listOfPlayersWithGivenTime(result[0].simTime) resultOfDecision = self.makeGroupDecisionNoCathegories(playersPerformingAction) action = resultOfDecision[0] target = resultOfDecision[1] ideaOriginators = resultOfDecision[2] resultOfRebelDetection = self.detectRebelsAndRemoveThemFromParticipants(playersPerformingAction,ideaOriginators) listOfRebels = resultOfRebelDetection[0] playersPerformingAction = resultOfRebelDetection[1] # now we have to implement bumps. For now takeCareOfBumo doesn't work, since I need to implement relationships first bumpDidntFailAction = self.takeCareOfBump(playersPerformingAction,target) if bumpDidntFailAction: actionResult = action.applyAction(playersPerformingAction,target) else: actionResult = action.failAction(playersPerformingAction,target) # This listOfPerformers is only for log to look nice. Players that are target and performer are removed from performers lists by action methods anyway to be extra safe. if not target is None: listOfPerformers = [x for x in playersPerformingAction if x not in target] else: listOfPerformers = playersPerformingAction self.writeActionInformationToLog(action,actionResult,listOfPerformers,target,listOfRebels,ideaOriginators) for p in playersPerformingAction: p.removeCalories(self.hourlyCalorieCost*action.time) p.simTime+=action.time p.passTimeOnAfflictions(action.time) afflictionChanges = p.checkForAfflictionsToPutAndRemove() #print(afflictionChanges) self.writeAfflictionInformationToLog(p,afflictionChanges) stringToWrite = p.getStringStatus(1) + "\tinventory\n"+ p.getStringItemList(2) + "\ttime passed so far:{}\n".format(p.simTime) self.theLog.writeToLog(stringToWrite) # Writes player status to the log isDead = p.isDead() if isDead[0]: result[1].removePlayer(p.id) stringToWrite=p.Name+" "+isDead[1]+'\n' self.theLog.writeToLog(stringToWrite) allFinished = self.areAllFinished() # finds a player that has the least of time passed # returns None if everyone is above dayDuration time def findEarlyiestPlayer(self): playerToReturn = None teamToReturn = None playerTime = self.dayDuration for team in self.listOfTeams: for player in team.playerList: if player.simTime<playerTime: playerTime = player.simTime playerToReturn = player teamToReturn = team return playerToReturn,teamToReturn # player with the least time passed - and his team # makes decision of one player # player - PlayerClass.Player # group - list of players, where decision making takes place. Player should be inside it. def makeDecisionNoCathegories(self,player,group): group.remove(player) group.insert(0,player) actionToReturn = None # action to perform actionPlayers = None # performers of the action actionTarget = None # target of the action rebelReason = player.isRebel() if rebelReason == PlayerClass.Player.rebelReasonDict["hunger"]: result = ActionMethods.EatFoodCheck([player]) if result: actionToReturn = PlayerClass.getActionByName("eat food") actionPlayers = [player] elif ActionMethods.AskForFoodCheck(group,[player]): actionToReturn = PlayerClass.getActionByName("ask for food") actionPlayers = group actionTarget = [player] else: actionToReturn = PlayerClass.getActionByName("gather food") actionPlayers = group else: # random action actionToReturn = PlayerClass.getPurelyRandomAction() actionPlayers = group if actionToReturn.name == "ask for food": actionTarget = [player] return actionToReturn, actionPlayers ,actionTarget # makes decision of group of players. # listOfPlayers - list with Player as elements def makeGroupDecisionNoCathegories(self,listOfPlayers): actionsAndPersuasionList = [] action = None actionTarget = None for p in listOfPlayers: result = self.makeDecisionNoCathegories(p,listOfPlayers) # [0] - action, [1] - players performing action,[2] - players to perform action on action = result[0] actionPlayers = result[1] actionTarget = result[2] persuasion = p.Charisma + 0.4 * p.Intelligence + 0.1*random.randint(0,20) appendIsNeeded = True for AaP in actionsAndPersuasionList: if AaP[0].name == action.name: appendIsNeeded = False AaP[1]+=persuasion AaP[3].append(p) break if appendIsNeeded: actionsAndPersuasionList.append([action,persuasion,actionTarget,[p]]) # find action with the biggest persuasion action = None actionTarget = None persuasion = -1 for AaP in actionsAndPersuasionList: if AaP[1]> persuasion: persuasion = AaP[1] action = AaP[0] actionTarget = AaP[2] originators =AaP[3] return action,actionTarget,originators #detects if all players in all teams have finished actions for today def areAllFinished(self): for team in self.listOfTeams: if not team.checkAllPlayersFinished(self.dayDuration): return False return True # Writes information about performed action into the log def writeActionInformationToLog(self,action,actionResult,playerList,targetList = None,listOfRebels = None,originators = None ): if actionResult: textToPrint = "success" else: textToPrint = "failure" playerNames = "" for p in playerList: playerNames+=p.Name+', ' playerNames = playerNames[:-2] targetNames ="" if (not targetList is None): if targetList: if not targetList[0] is None: targetNames = " targets: " for t in targetList: targetNames+=t.Name+', ' targetNames = targetNames[:-2] rebelString = "" if (not listOfRebels is None): if listOfRebels: rebelString = "\n== People who rebelled: " for r in listOfRebels: # my god I goofed out. Dictionary in python is not meant to be used backwards # and so I ended up with this monstrosity. Oh well. tmpString = list(PlayerClass.Player.rebelReasonDict.keys())[list(PlayerClass.Player.rebelReasonDict.values()).index(r["cause"])] rebelString+="{0} for {1}, ".format(r["rebel"].Name,tmpString) rebelString = rebelString[:-2] originatorsText = "" if (not originators is None): if originators: originatorsText = " originators:" for o in originators: originatorsText+=o.Name+', ' originatorsText = originatorsText[:-2] stringToWriteToLog = "\n==== Action Performed: {0}, result:{1}\n== Action performed by:{2}".format(action.name,textToPrint,playerNames) stringToWriteToLog+=targetNames+originatorsText+rebelString+'\n' self.theLog.writeToLog(stringToWriteToLog) # Writes information about gained and lost afflictions def writeAfflictionInformationToLog(self,player,afflictionChanges): afflictionStringToWrite = "" if afflictionChanges[0]: # added afflictions if len(afflictionChanges[0]) == 1: afflictionStringToWrite+="\t{} gained affliction: ".format(player.Name) else: afflictionStringToWrite+="\t{} gained afflictions: ".format(player.Name) for a in afflictionChanges[0]: afflictionStringToWrite+="{}, ".format(a.name) afflictionStringToWrite = afflictionStringToWrite[:-2] afflictionStringToWrite+='\n' if afflictionChanges[1]: # removed afflictions if len(afflictionChanges[1]) == 1: afflictionStringToWrite+="\t{} lost affliction: ".format(player.Name) else: afflictionStringToWrite+="\t{} lost afflictions: ".format(player.Name) for a in afflictionChanges[1]: afflictionStringToWrite+="{}, ".format(a.name) afflictionStringToWrite = afflictionStringToWrite[:-2] afflictionStringToWrite+='\n' if not afflictionStringToWrite == "": self.theLog.writeToLog(afflictionStringToWrite) # gives all players an unique id - becuase things go bad if they are not unique. def giveAllPlayersUniqueIDs(self): listOfUsedIDs = [] id = 0 for team in self.listOfTeams: for player in team.playerList: while id in listOfUsedIDs: id = random.randint(1,100000) player.id = id listOfUsedIDs.append(id) # of course, targets have no say in if they want to be a part of an action or not. # returns a list of rebels def detectRebelsAndRemoveThemFromParticipants(self, listOfPlayers, ideaOriginators): listOfRebels = [] listOfParticipants = [] rebelReason = None for player in listOfPlayers: rebelReason = player.isRebel() if rebelReason != PlayerClass.Player.rebelReasonDict["no rebel"] and not player in ideaOriginators: listOfRebels.append({"rebel":player,"cause":copy.deepcopy(rebelReason)}) else: listOfParticipants.append(player) return listOfRebels,listOfParticipants # Well. This method takes care of bumps # If there was no bump, or bump didn't interrupted current action def takeCareOfBump(self, listOfPlayers,target): listOfBumpablePlayers = [] return True class Team: # listOfPlayers - list with players as elements def __init__(self,listOfPlayers): self.playerList = listOfPlayers self.allPlayersFinished = False # returns a player from list. None, if player is not in team. # name - either string or int def getPlayer(self,name): if type(name) is int: for p in self.playerList: if p.id == name: return p elif type(name) is str: for p in self.playerList: if p.Name == name: return p else: raise Exception("Wrong parameter type") return None # checks if all players have finished the day. # dayTimeLimit - int def checkAllPlayersFinished(self,dayTimeLimit): for p in self.playerList: if p.simTime<dayTimeLimit: self.allPlayersFinished = False self.allPlayersFinished = True # sets all players time in the team to zero def resetPlayersTime(self): for p in self.playerList: p.simTime = 0 # checks, if all players have the same time. # players with different times can't perform an action together. # becasue they are in different points in time def areAllPlayersTheSameTime(self): time = self.playerList[0].simTime for p in self.playerList: if not p.simTime == time: return False return True # Returns list of players with the same time def listOfPlayersWithGivenTime(self,time): listToReturn = [] for p in self.playerList: if p.simTime == time: listToReturn.append(p) return listToReturn # removes player from team. Returns true if player was removed. # name - str or int def removePlayer(self,name): for p in self.playerList: if p.Name == name or p.id == name: self.playerList.remove(p) return True return False # --- test --- pl = PlayerClass.Player(0) pl.BaseWisdom = 5 pl.Name = "Jan Kowalski" pl.calculateStartStatistics() pl2 = PlayerClass.Player(0) pl2.BaseWisdom = 0 pl2.Name = "Adam Nowak" pl2.calculateStartStatistics() team = Team([pl,pl2]) theLog = Log.LogObject('log1') s = Sym('log1') s.listOfTeams.append(team) s.giveAllPlayersUniqueIDs() for x in range(1,100): theLog.writeToLog("\n~~ DAY {}~~\n\n".format(x)) s.runForADay()
{ "content_hash": "f91451f6c0f41da04d3e136267a35d20", "timestamp": "", "source": "github", "line_count": 325, "max_line_length": 171, "avg_line_length": 38.34153846153846, "alnum_prop": 0.7139073910601076, "repo_name": "Pernowi/Hunger-Games-Sim", "id": "9648ed0d547cfcf3975e9c2e2b8d7388e19f0bf3", "size": "12461", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Symulation.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "39459" } ], "symlink_target": "" }
"""Generated message classes for dataflow version v1b3. Develops and executes data processing patterns like ETL, batch computation, and continuous computation. """ # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.protorpclite import messages as _messages from apitools.base.py import encoding from apitools.base.py import extra_types package = 'dataflow' class ApproximateProgress(_messages.Message): """Obsolete in favor of ApproximateReportedProgress and ApproximateSplitRequest. Fields: percentComplete: Obsolete. position: Obsolete. remainingTime: Obsolete. """ percentComplete = _messages.FloatField(1, variant=_messages.Variant.FLOAT) position = _messages.MessageField('Position', 2) remainingTime = _messages.StringField(3) class ApproximateReportedProgress(_messages.Message): """A progress measurement of a WorkItem by a worker. Fields: consumedParallelism: Total amount of parallelism in the portion of input of this task that has already been consumed and is no longer active. In the first two examples above (see remaining_parallelism), the value should be 29 or 2 respectively. The sum of remaining_parallelism and consumed_parallelism should equal the total amount of parallelism in this work item. If specified, must be finite. fractionConsumed: Completion as fraction of the input consumed, from 0.0 (beginning, nothing consumed), to 1.0 (end of the input, entire input consumed). position: A Position within the work to represent a progress. remainingParallelism: Total amount of parallelism in the input of this task that remains, (i.e. can be delegated to this task and any new tasks via dynamic splitting). Always at least 1 for non-finished work items and 0 for finished. "Amount of parallelism" refers to how many non- empty parts of the input can be read in parallel. This does not necessarily equal number of records. An input that can be read in parallel down to the individual records is called "perfectly splittable". An example of non-perfectly parallelizable input is a block-compressed file format where a block of records has to be read as a whole, but different blocks can be read in parallel. Examples: * If we are processing record #30 (starting at 1) out of 50 in a perfectly splittable 50-record input, this value should be 21 (20 remaining + 1 current). * If we are reading through block 3 in a block-compressed file consisting of 5 blocks, this value should be 3 (since blocks 4 and 5 can be processed in parallel by new tasks via dynamic splitting and the current task remains processing block 3). * If we are reading through the last block in a block-compressed file, or reading or processing the last record in a perfectly splittable input, this value should be 1, because apart from the current task, no additional remainder can be split off. """ consumedParallelism = _messages.MessageField('ReportedParallelism', 1) fractionConsumed = _messages.FloatField(2) position = _messages.MessageField('Position', 3) remainingParallelism = _messages.MessageField('ReportedParallelism', 4) class ApproximateSplitRequest(_messages.Message): """A suggestion by the service to the worker to dynamically split the WorkItem. Fields: fractionConsumed: A fraction at which to split the work item, from 0.0 (beginning of the input) to 1.0 (end of the input). position: A Position at which to split the work item. """ fractionConsumed = _messages.FloatField(1) position = _messages.MessageField('Position', 2) class AutoscalingEvent(_messages.Message): """A structured message reporting an autoscaling decision made by the Dataflow service. Enums: EventTypeValueValuesEnum: The type of autoscaling event to report. Fields: currentNumWorkers: The current number of workers the job has. description: A message describing why the system decided to adjust the current number of workers, why it failed, or why the system decided to not make any changes to the number of workers. eventType: The type of autoscaling event to report. targetNumWorkers: The target number of workers the worker pool wants to resize to use. time: The time this event was emitted to indicate a new target or current num_workers value. """ class EventTypeValueValuesEnum(_messages.Enum): """The type of autoscaling event to report. Values: TYPE_UNKNOWN: Default type for the enum. Value should never be returned. TARGET_NUM_WORKERS_CHANGED: The TARGET_NUM_WORKERS_CHANGED type should be used when the target worker pool size has changed at the start of an actuation. An event should always be specified as TARGET_NUM_WORKERS_CHANGED if it reflects a change in the target_num_workers. CURRENT_NUM_WORKERS_CHANGED: The CURRENT_NUM_WORKERS_CHANGED type should be used when actual worker pool size has been changed, but the target_num_workers has not changed. ACTUATION_FAILURE: The ACTUATION_FAILURE type should be used when we want to report an error to the user indicating why the current number of workers in the pool could not be changed. Displayed in the current status and history widgets. NO_CHANGE: Used when we want to report to the user a reason why we are not currently adjusting the number of workers. Should specify both target_num_workers, current_num_workers and a decision_message. """ TYPE_UNKNOWN = 0 TARGET_NUM_WORKERS_CHANGED = 1 CURRENT_NUM_WORKERS_CHANGED = 2 ACTUATION_FAILURE = 3 NO_CHANGE = 4 currentNumWorkers = _messages.IntegerField(1) description = _messages.MessageField('StructuredMessage', 2) eventType = _messages.EnumField('EventTypeValueValuesEnum', 3) targetNumWorkers = _messages.IntegerField(4) time = _messages.StringField(5) class AutoscalingSettings(_messages.Message): """Settings for WorkerPool autoscaling. Enums: AlgorithmValueValuesEnum: The algorithm to use for autoscaling. Fields: algorithm: The algorithm to use for autoscaling. maxNumWorkers: The maximum number of workers to cap scaling at. """ class AlgorithmValueValuesEnum(_messages.Enum): """The algorithm to use for autoscaling. Values: AUTOSCALING_ALGORITHM_UNKNOWN: The algorithm is unknown, or unspecified. AUTOSCALING_ALGORITHM_NONE: Disable autoscaling. AUTOSCALING_ALGORITHM_BASIC: Increase worker count over time to reduce job execution time. """ AUTOSCALING_ALGORITHM_UNKNOWN = 0 AUTOSCALING_ALGORITHM_NONE = 1 AUTOSCALING_ALGORITHM_BASIC = 2 algorithm = _messages.EnumField('AlgorithmValueValuesEnum', 1) maxNumWorkers = _messages.IntegerField(2, variant=_messages.Variant.INT32) class CPUTime(_messages.Message): """Modeled after information exposed by /proc/stat. Fields: rate: Average CPU utilization rate (% non-idle cpu / second) since previous sample. timestamp: Timestamp of the measurement. totalMs: Total active CPU time across all cores (ie., non-idle) in milliseconds since start-up. """ rate = _messages.FloatField(1) timestamp = _messages.StringField(2) totalMs = _messages.IntegerField(3, variant=_messages.Variant.UINT64) class ComponentSource(_messages.Message): """Description of an interstitial value between transforms in an execution stage. Fields: name: Dataflow service generated name for this source. originalTransformOrCollection: User name for the original user transform or collection with which this source is most closely associated. userName: Human-readable name for this transform; may be user or system generated. """ name = _messages.StringField(1) originalTransformOrCollection = _messages.StringField(2) userName = _messages.StringField(3) class ComponentTransform(_messages.Message): """Description of a transform executed as part of an execution stage. Fields: name: Dataflow service generated name for this source. originalTransform: User name for the original user transform with which this transform is most closely associated. userName: Human-readable name for this transform; may be user or system generated. """ name = _messages.StringField(1) originalTransform = _messages.StringField(2) userName = _messages.StringField(3) class ComputationTopology(_messages.Message): """All configuration data for a particular Computation. Fields: computationId: The ID of the computation. inputs: The inputs to the computation. keyRanges: The key ranges processed by the computation. outputs: The outputs from the computation. stateFamilies: The state family values. systemStageName: The system stage name. """ computationId = _messages.StringField(1) inputs = _messages.MessageField('StreamLocation', 2, repeated=True) keyRanges = _messages.MessageField('KeyRangeLocation', 3, repeated=True) outputs = _messages.MessageField('StreamLocation', 4, repeated=True) stateFamilies = _messages.MessageField('StateFamilyConfig', 5, repeated=True) systemStageName = _messages.StringField(6) class ConcatPosition(_messages.Message): """A position that encapsulates an inner position and an index for the inner position. A ConcatPosition can be used by a reader of a source that encapsulates a set of other sources. Fields: index: Index of the inner source. position: Position within the inner source. """ index = _messages.IntegerField(1, variant=_messages.Variant.INT32) position = _messages.MessageField('Position', 2) class CounterMetadata(_messages.Message): """CounterMetadata includes all static non-name non-value counter attributes. Enums: KindValueValuesEnum: Counter aggregation kind. StandardUnitsValueValuesEnum: System defined Units, see above enum. Fields: description: Human-readable description of the counter semantics. kind: Counter aggregation kind. otherUnits: A string referring to the unit type. standardUnits: System defined Units, see above enum. """ class KindValueValuesEnum(_messages.Enum): """Counter aggregation kind. Values: INVALID: Counter aggregation kind was not set. SUM: Aggregated value is the sum of all contributed values. MAX: Aggregated value is the max of all contributed values. MIN: Aggregated value is the min of all contributed values. MEAN: Aggregated value is the mean of all contributed values. OR: Aggregated value represents the logical 'or' of all contributed values. AND: Aggregated value represents the logical 'and' of all contributed values. SET: Aggregated value is a set of unique contributed values. DISTRIBUTION: Aggregated value captures statistics about a distribution. LATEST_VALUE: Aggregated value tracks the latest value of a variable. """ INVALID = 0 SUM = 1 MAX = 2 MIN = 3 MEAN = 4 OR = 5 AND = 6 SET = 7 DISTRIBUTION = 8 LATEST_VALUE = 9 class StandardUnitsValueValuesEnum(_messages.Enum): """System defined Units, see above enum. Values: BYTES: Counter returns a value in bytes. BYTES_PER_SEC: Counter returns a value in bytes per second. MILLISECONDS: Counter returns a value in milliseconds. MICROSECONDS: Counter returns a value in microseconds. NANOSECONDS: Counter returns a value in nanoseconds. TIMESTAMP_MSEC: Counter returns a timestamp in milliseconds. TIMESTAMP_USEC: Counter returns a timestamp in microseconds. TIMESTAMP_NSEC: Counter returns a timestamp in nanoseconds. """ BYTES = 0 BYTES_PER_SEC = 1 MILLISECONDS = 2 MICROSECONDS = 3 NANOSECONDS = 4 TIMESTAMP_MSEC = 5 TIMESTAMP_USEC = 6 TIMESTAMP_NSEC = 7 description = _messages.StringField(1) kind = _messages.EnumField('KindValueValuesEnum', 2) otherUnits = _messages.StringField(3) standardUnits = _messages.EnumField('StandardUnitsValueValuesEnum', 4) class CounterStructuredName(_messages.Message): """Identifies a counter within a per-job namespace. Counters whose structured names are the same get merged into a single value for the job. Enums: OriginValueValuesEnum: One of the standard Origins defined above. PortionValueValuesEnum: Portion of this counter, either key or value. Fields: componentStepName: Name of the optimized step being executed by the workers. executionStepName: Name of the stage. An execution step contains multiple component steps. inputIndex: Index of an input collection that's being read from/written to as a side input. The index identifies a step's side inputs starting by 1 (e.g. the first side input has input_index 1, the third has input_index 3). Side inputs are identified by a pair of (original_step_name, input_index). This field helps uniquely identify them. name: Counter name. Not necessarily globally-unique, but unique within the context of the other fields. Required. origin: One of the standard Origins defined above. originNamespace: A string containing a more specific namespace of the counter's origin. originalRequestingStepName: The step name requesting an operation, such as GBK. I.e. the ParDo causing a read/write from shuffle to occur, or a read from side inputs. originalStepName: System generated name of the original step in the user's graph, before optimization. portion: Portion of this counter, either key or value. workerId: ID of a particular worker. """ class OriginValueValuesEnum(_messages.Enum): """One of the standard Origins defined above. Values: SYSTEM: Counter was created by the Dataflow system. USER: Counter was created by the user. """ SYSTEM = 0 USER = 1 class PortionValueValuesEnum(_messages.Enum): """Portion of this counter, either key or value. Values: ALL: Counter portion has not been set. KEY: Counter reports a key. VALUE: Counter reports a value. """ ALL = 0 KEY = 1 VALUE = 2 componentStepName = _messages.StringField(1) executionStepName = _messages.StringField(2) inputIndex = _messages.IntegerField(3, variant=_messages.Variant.INT32) name = _messages.StringField(4) origin = _messages.EnumField('OriginValueValuesEnum', 5) originNamespace = _messages.StringField(6) originalRequestingStepName = _messages.StringField(7) originalStepName = _messages.StringField(8) portion = _messages.EnumField('PortionValueValuesEnum', 9) workerId = _messages.StringField(10) class CounterStructuredNameAndMetadata(_messages.Message): """A single message which encapsulates structured name and metadata for a given counter. Fields: metadata: Metadata associated with a counter name: Structured name of the counter. """ metadata = _messages.MessageField('CounterMetadata', 1) name = _messages.MessageField('CounterStructuredName', 2) class CounterUpdate(_messages.Message): """An update to a Counter sent from a worker. Fields: boolean: Boolean value for And, Or. cumulative: True if this counter is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this counter is reported as a delta. distribution: Distribution data floatingPoint: Floating point value for Sum, Max, Min. floatingPointList: List of floating point numbers, for Set. floatingPointMean: Floating point mean aggregation value for Mean. integer: Integer value for Sum, Max, Min. integerGauge: Gauge data integerList: List of integers, for Set. integerMean: Integer mean aggregation value for Mean. internal: Value for internally-defined counters used by the Dataflow service. nameAndKind: Counter name and aggregation type. shortId: The service-generated short identifier for this counter. The short_id -> (name, metadata) mapping is constant for the lifetime of a job. stringList: List of strings, for Set. structuredNameAndMetadata: Counter structured name and metadata. """ boolean = _messages.BooleanField(1) cumulative = _messages.BooleanField(2) distribution = _messages.MessageField('DistributionUpdate', 3) floatingPoint = _messages.FloatField(4) floatingPointList = _messages.MessageField('FloatingPointList', 5) floatingPointMean = _messages.MessageField('FloatingPointMean', 6) integer = _messages.MessageField('SplitInt64', 7) integerGauge = _messages.MessageField('IntegerGauge', 8) integerList = _messages.MessageField('IntegerList', 9) integerMean = _messages.MessageField('IntegerMean', 10) internal = _messages.MessageField('extra_types.JsonValue', 11) nameAndKind = _messages.MessageField('NameAndKind', 12) shortId = _messages.IntegerField(13) stringList = _messages.MessageField('StringList', 14) structuredNameAndMetadata = _messages.MessageField('CounterStructuredNameAndMetadata', 15) class CreateJobFromTemplateRequest(_messages.Message): """A request to create a Cloud Dataflow job from a template. Messages: ParametersValue: The runtime parameters to pass to the job. Fields: environment: The runtime environment for the job. gcsPath: Required. A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with `gs://`. jobName: Required. The job name to use for the created job. location: The location to which to direct the request. parameters: The runtime parameters to pass to the job. """ @encoding.MapUnrecognizedFields('additionalProperties') class ParametersValue(_messages.Message): """The runtime parameters to pass to the job. Messages: AdditionalProperty: An additional property for a ParametersValue object. Fields: additionalProperties: Additional properties of type ParametersValue """ class AdditionalProperty(_messages.Message): """An additional property for a ParametersValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) environment = _messages.MessageField('RuntimeEnvironment', 1) gcsPath = _messages.StringField(2) jobName = _messages.StringField(3) location = _messages.StringField(4) parameters = _messages.MessageField('ParametersValue', 5) class CustomSourceLocation(_messages.Message): """Identifies the location of a custom souce. Fields: stateful: Whether this source is stateful. """ stateful = _messages.BooleanField(1) class DataDiskAssignment(_messages.Message): """Data disk assignment for a given VM instance. Fields: dataDisks: Mounted data disks. The order is important a data disk's 0-based index in this list defines which persistent directory the disk is mounted to, for example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" }, { "myproject-1014-104817-4c2-harness-0-disk-1" }. vmInstance: VM instance name the data disks mounted to, for example "myproject-1014-104817-4c2-harness-0". """ dataDisks = _messages.StringField(1, repeated=True) vmInstance = _messages.StringField(2) class DataflowProjectsJobsAggregatedRequest(_messages.Message): """A DataflowProjectsJobsAggregatedRequest object. Enums: FilterValueValuesEnum: The kind of filter to use. ViewValueValuesEnum: Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. Fields: filter: The kind of filter to use. location: The location that contains this job. pageSize: If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit. pageToken: Set this to the 'next_page_token' field of a previous response to request additional results in a long list. projectId: The project which owns the jobs. view: Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. """ class FilterValueValuesEnum(_messages.Enum): """The kind of filter to use. Values: UNKNOWN: <no description> ALL: <no description> TERMINATED: <no description> ACTIVE: <no description> """ UNKNOWN = 0 ALL = 1 TERMINATED = 2 ACTIVE = 3 class ViewValueValuesEnum(_messages.Enum): """Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. Values: JOB_VIEW_UNKNOWN: <no description> JOB_VIEW_SUMMARY: <no description> JOB_VIEW_ALL: <no description> JOB_VIEW_DESCRIPTION: <no description> """ JOB_VIEW_UNKNOWN = 0 JOB_VIEW_SUMMARY = 1 JOB_VIEW_ALL = 2 JOB_VIEW_DESCRIPTION = 3 filter = _messages.EnumField('FilterValueValuesEnum', 1) location = _messages.StringField(2) pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32) pageToken = _messages.StringField(4) projectId = _messages.StringField(5, required=True) view = _messages.EnumField('ViewValueValuesEnum', 6) class DataflowProjectsJobsCreateRequest(_messages.Message): """A DataflowProjectsJobsCreateRequest object. Enums: ViewValueValuesEnum: The level of information requested in response. Fields: job: A Job resource to be passed as the request body. location: The location that contains this job. projectId: The ID of the Cloud Platform project that the job belongs to. replaceJobId: Deprecated. This field is now in the Job message. view: The level of information requested in response. """ class ViewValueValuesEnum(_messages.Enum): """The level of information requested in response. Values: JOB_VIEW_UNKNOWN: <no description> JOB_VIEW_SUMMARY: <no description> JOB_VIEW_ALL: <no description> JOB_VIEW_DESCRIPTION: <no description> """ JOB_VIEW_UNKNOWN = 0 JOB_VIEW_SUMMARY = 1 JOB_VIEW_ALL = 2 JOB_VIEW_DESCRIPTION = 3 job = _messages.MessageField('Job', 1) location = _messages.StringField(2) projectId = _messages.StringField(3, required=True) replaceJobId = _messages.StringField(4) view = _messages.EnumField('ViewValueValuesEnum', 5) class DataflowProjectsJobsDebugGetConfigRequest(_messages.Message): """A DataflowProjectsJobsDebugGetConfigRequest object. Fields: getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as the request body. jobId: The job id. projectId: The project id. """ getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1) jobId = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) class DataflowProjectsJobsDebugSendCaptureRequest(_messages.Message): """A DataflowProjectsJobsDebugSendCaptureRequest object. Fields: jobId: The job id. projectId: The project id. sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed as the request body. """ jobId = _messages.StringField(1, required=True) projectId = _messages.StringField(2, required=True) sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 3) class DataflowProjectsJobsGetMetricsRequest(_messages.Message): """A DataflowProjectsJobsGetMetricsRequest object. Fields: jobId: The job to get messages for. location: The location which contains the job specified by job_id. projectId: A project id. startTime: Return only metric data that has changed since this time. Default is to return all information about all metrics for the job. """ jobId = _messages.StringField(1, required=True) location = _messages.StringField(2) projectId = _messages.StringField(3, required=True) startTime = _messages.StringField(4) class DataflowProjectsJobsGetRequest(_messages.Message): """A DataflowProjectsJobsGetRequest object. Enums: ViewValueValuesEnum: The level of information requested in response. Fields: jobId: The job ID. location: The location that contains this job. projectId: The ID of the Cloud Platform project that the job belongs to. view: The level of information requested in response. """ class ViewValueValuesEnum(_messages.Enum): """The level of information requested in response. Values: JOB_VIEW_UNKNOWN: <no description> JOB_VIEW_SUMMARY: <no description> JOB_VIEW_ALL: <no description> JOB_VIEW_DESCRIPTION: <no description> """ JOB_VIEW_UNKNOWN = 0 JOB_VIEW_SUMMARY = 1 JOB_VIEW_ALL = 2 JOB_VIEW_DESCRIPTION = 3 jobId = _messages.StringField(1, required=True) location = _messages.StringField(2) projectId = _messages.StringField(3, required=True) view = _messages.EnumField('ViewValueValuesEnum', 4) class DataflowProjectsJobsListRequest(_messages.Message): """A DataflowProjectsJobsListRequest object. Enums: FilterValueValuesEnum: The kind of filter to use. ViewValueValuesEnum: Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. Fields: filter: The kind of filter to use. location: The location that contains this job. pageSize: If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit. pageToken: Set this to the 'next_page_token' field of a previous response to request additional results in a long list. projectId: The project which owns the jobs. view: Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. """ class FilterValueValuesEnum(_messages.Enum): """The kind of filter to use. Values: UNKNOWN: <no description> ALL: <no description> TERMINATED: <no description> ACTIVE: <no description> """ UNKNOWN = 0 ALL = 1 TERMINATED = 2 ACTIVE = 3 class ViewValueValuesEnum(_messages.Enum): """Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. Values: JOB_VIEW_UNKNOWN: <no description> JOB_VIEW_SUMMARY: <no description> JOB_VIEW_ALL: <no description> JOB_VIEW_DESCRIPTION: <no description> """ JOB_VIEW_UNKNOWN = 0 JOB_VIEW_SUMMARY = 1 JOB_VIEW_ALL = 2 JOB_VIEW_DESCRIPTION = 3 filter = _messages.EnumField('FilterValueValuesEnum', 1) location = _messages.StringField(2) pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32) pageToken = _messages.StringField(4) projectId = _messages.StringField(5, required=True) view = _messages.EnumField('ViewValueValuesEnum', 6) class DataflowProjectsJobsMessagesListRequest(_messages.Message): """A DataflowProjectsJobsMessagesListRequest object. Enums: MinimumImportanceValueValuesEnum: Filter to only get messages with importance >= level Fields: endTime: Return only messages with timestamps < end_time. The default is now (i.e. return up to the latest messages available). jobId: The job to get messages about. location: The location which contains the job specified by job_id. minimumImportance: Filter to only get messages with importance >= level pageSize: If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results. pageToken: If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned. projectId: A project id. startTime: If specified, return only messages with timestamps >= start_time. The default is the job creation time (i.e. beginning of messages). """ class MinimumImportanceValueValuesEnum(_messages.Enum): """Filter to only get messages with importance >= level Values: JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description> JOB_MESSAGE_DEBUG: <no description> JOB_MESSAGE_DETAILED: <no description> JOB_MESSAGE_BASIC: <no description> JOB_MESSAGE_WARNING: <no description> JOB_MESSAGE_ERROR: <no description> """ JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0 JOB_MESSAGE_DEBUG = 1 JOB_MESSAGE_DETAILED = 2 JOB_MESSAGE_BASIC = 3 JOB_MESSAGE_WARNING = 4 JOB_MESSAGE_ERROR = 5 endTime = _messages.StringField(1) jobId = _messages.StringField(2, required=True) location = _messages.StringField(3) minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4) pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32) pageToken = _messages.StringField(6) projectId = _messages.StringField(7, required=True) startTime = _messages.StringField(8) class DataflowProjectsJobsUpdateRequest(_messages.Message): """A DataflowProjectsJobsUpdateRequest object. Fields: job: A Job resource to be passed as the request body. jobId: The job ID. location: The location that contains this job. projectId: The ID of the Cloud Platform project that the job belongs to. """ job = _messages.MessageField('Job', 1) jobId = _messages.StringField(2, required=True) location = _messages.StringField(3) projectId = _messages.StringField(4, required=True) class DataflowProjectsJobsWorkItemsLeaseRequest(_messages.Message): """A DataflowProjectsJobsWorkItemsLeaseRequest object. Fields: jobId: Identifies the workflow job this worker belongs to. leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the request body. projectId: Identifies the project this worker belongs to. """ jobId = _messages.StringField(1, required=True) leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2) projectId = _messages.StringField(3, required=True) class DataflowProjectsJobsWorkItemsReportStatusRequest(_messages.Message): """A DataflowProjectsJobsWorkItemsReportStatusRequest object. Fields: jobId: The job which the WorkItem is part of. projectId: The project which owns the WorkItem's job. reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be passed as the request body. """ jobId = _messages.StringField(1, required=True) projectId = _messages.StringField(2, required=True) reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 3) class DataflowProjectsLocationsJobsCreateRequest(_messages.Message): """A DataflowProjectsLocationsJobsCreateRequest object. Enums: ViewValueValuesEnum: The level of information requested in response. Fields: job: A Job resource to be passed as the request body. location: The location that contains this job. projectId: The ID of the Cloud Platform project that the job belongs to. replaceJobId: Deprecated. This field is now in the Job message. view: The level of information requested in response. """ class ViewValueValuesEnum(_messages.Enum): """The level of information requested in response. Values: JOB_VIEW_UNKNOWN: <no description> JOB_VIEW_SUMMARY: <no description> JOB_VIEW_ALL: <no description> JOB_VIEW_DESCRIPTION: <no description> """ JOB_VIEW_UNKNOWN = 0 JOB_VIEW_SUMMARY = 1 JOB_VIEW_ALL = 2 JOB_VIEW_DESCRIPTION = 3 job = _messages.MessageField('Job', 1) location = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) replaceJobId = _messages.StringField(4) view = _messages.EnumField('ViewValueValuesEnum', 5) class DataflowProjectsLocationsJobsDebugGetConfigRequest(_messages.Message): """A DataflowProjectsLocationsJobsDebugGetConfigRequest object. Fields: getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as the request body. jobId: The job id. location: The location which contains the job specified by job_id. projectId: The project id. """ getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1) jobId = _messages.StringField(2, required=True) location = _messages.StringField(3, required=True) projectId = _messages.StringField(4, required=True) class DataflowProjectsLocationsJobsDebugSendCaptureRequest(_messages.Message): """A DataflowProjectsLocationsJobsDebugSendCaptureRequest object. Fields: jobId: The job id. location: The location which contains the job specified by job_id. projectId: The project id. sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed as the request body. """ jobId = _messages.StringField(1, required=True) location = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 4) class DataflowProjectsLocationsJobsGetMetricsRequest(_messages.Message): """A DataflowProjectsLocationsJobsGetMetricsRequest object. Fields: jobId: The job to get messages for. location: The location which contains the job specified by job_id. projectId: A project id. startTime: Return only metric data that has changed since this time. Default is to return all information about all metrics for the job. """ jobId = _messages.StringField(1, required=True) location = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) startTime = _messages.StringField(4) class DataflowProjectsLocationsJobsGetRequest(_messages.Message): """A DataflowProjectsLocationsJobsGetRequest object. Enums: ViewValueValuesEnum: The level of information requested in response. Fields: jobId: The job ID. location: The location that contains this job. projectId: The ID of the Cloud Platform project that the job belongs to. view: The level of information requested in response. """ class ViewValueValuesEnum(_messages.Enum): """The level of information requested in response. Values: JOB_VIEW_UNKNOWN: <no description> JOB_VIEW_SUMMARY: <no description> JOB_VIEW_ALL: <no description> JOB_VIEW_DESCRIPTION: <no description> """ JOB_VIEW_UNKNOWN = 0 JOB_VIEW_SUMMARY = 1 JOB_VIEW_ALL = 2 JOB_VIEW_DESCRIPTION = 3 jobId = _messages.StringField(1, required=True) location = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) view = _messages.EnumField('ViewValueValuesEnum', 4) class DataflowProjectsLocationsJobsListRequest(_messages.Message): """A DataflowProjectsLocationsJobsListRequest object. Enums: FilterValueValuesEnum: The kind of filter to use. ViewValueValuesEnum: Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. Fields: filter: The kind of filter to use. location: The location that contains this job. pageSize: If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit. pageToken: Set this to the 'next_page_token' field of a previous response to request additional results in a long list. projectId: The project which owns the jobs. view: Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. """ class FilterValueValuesEnum(_messages.Enum): """The kind of filter to use. Values: UNKNOWN: <no description> ALL: <no description> TERMINATED: <no description> ACTIVE: <no description> """ UNKNOWN = 0 ALL = 1 TERMINATED = 2 ACTIVE = 3 class ViewValueValuesEnum(_messages.Enum): """Level of information requested in response. Default is `JOB_VIEW_SUMMARY`. Values: JOB_VIEW_UNKNOWN: <no description> JOB_VIEW_SUMMARY: <no description> JOB_VIEW_ALL: <no description> JOB_VIEW_DESCRIPTION: <no description> """ JOB_VIEW_UNKNOWN = 0 JOB_VIEW_SUMMARY = 1 JOB_VIEW_ALL = 2 JOB_VIEW_DESCRIPTION = 3 filter = _messages.EnumField('FilterValueValuesEnum', 1) location = _messages.StringField(2, required=True) pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32) pageToken = _messages.StringField(4) projectId = _messages.StringField(5, required=True) view = _messages.EnumField('ViewValueValuesEnum', 6) class DataflowProjectsLocationsJobsMessagesListRequest(_messages.Message): """A DataflowProjectsLocationsJobsMessagesListRequest object. Enums: MinimumImportanceValueValuesEnum: Filter to only get messages with importance >= level Fields: endTime: Return only messages with timestamps < end_time. The default is now (i.e. return up to the latest messages available). jobId: The job to get messages about. location: The location which contains the job specified by job_id. minimumImportance: Filter to only get messages with importance >= level pageSize: If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results. pageToken: If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned. projectId: A project id. startTime: If specified, return only messages with timestamps >= start_time. The default is the job creation time (i.e. beginning of messages). """ class MinimumImportanceValueValuesEnum(_messages.Enum): """Filter to only get messages with importance >= level Values: JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description> JOB_MESSAGE_DEBUG: <no description> JOB_MESSAGE_DETAILED: <no description> JOB_MESSAGE_BASIC: <no description> JOB_MESSAGE_WARNING: <no description> JOB_MESSAGE_ERROR: <no description> """ JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0 JOB_MESSAGE_DEBUG = 1 JOB_MESSAGE_DETAILED = 2 JOB_MESSAGE_BASIC = 3 JOB_MESSAGE_WARNING = 4 JOB_MESSAGE_ERROR = 5 endTime = _messages.StringField(1) jobId = _messages.StringField(2, required=True) location = _messages.StringField(3, required=True) minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4) pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32) pageToken = _messages.StringField(6) projectId = _messages.StringField(7, required=True) startTime = _messages.StringField(8) class DataflowProjectsLocationsJobsUpdateRequest(_messages.Message): """A DataflowProjectsLocationsJobsUpdateRequest object. Fields: job: A Job resource to be passed as the request body. jobId: The job ID. location: The location that contains this job. projectId: The ID of the Cloud Platform project that the job belongs to. """ job = _messages.MessageField('Job', 1) jobId = _messages.StringField(2, required=True) location = _messages.StringField(3, required=True) projectId = _messages.StringField(4, required=True) class DataflowProjectsLocationsJobsWorkItemsLeaseRequest(_messages.Message): """A DataflowProjectsLocationsJobsWorkItemsLeaseRequest object. Fields: jobId: Identifies the workflow job this worker belongs to. leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the request body. location: The location which contains the WorkItem's job. projectId: Identifies the project this worker belongs to. """ jobId = _messages.StringField(1, required=True) leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2) location = _messages.StringField(3, required=True) projectId = _messages.StringField(4, required=True) class DataflowProjectsLocationsJobsWorkItemsReportStatusRequest(_messages.Message): """A DataflowProjectsLocationsJobsWorkItemsReportStatusRequest object. Fields: jobId: The job which the WorkItem is part of. location: The location which contains the WorkItem's job. projectId: The project which owns the WorkItem's job. reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be passed as the request body. """ jobId = _messages.StringField(1, required=True) location = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 4) class DataflowProjectsLocationsTemplatesCreateRequest(_messages.Message): """A DataflowProjectsLocationsTemplatesCreateRequest object. Fields: createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to be passed as the request body. location: The location to which to direct the request. projectId: Required. The ID of the Cloud Platform project that the job belongs to. """ createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1) location = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) class DataflowProjectsLocationsTemplatesGetRequest(_messages.Message): """A DataflowProjectsLocationsTemplatesGetRequest object. Enums: ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY. Fields: gcsPath: Required. A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with `gs://`. location: The location to which to direct the request. projectId: Required. The ID of the Cloud Platform project that the job belongs to. view: The view to retrieve. Defaults to METADATA_ONLY. """ class ViewValueValuesEnum(_messages.Enum): """The view to retrieve. Defaults to METADATA_ONLY. Values: METADATA_ONLY: <no description> """ METADATA_ONLY = 0 gcsPath = _messages.StringField(1) location = _messages.StringField(2, required=True) projectId = _messages.StringField(3, required=True) view = _messages.EnumField('ViewValueValuesEnum', 4) class DataflowProjectsLocationsTemplatesLaunchRequest(_messages.Message): """A DataflowProjectsLocationsTemplatesLaunchRequest object. Fields: gcsPath: Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'. launchTemplateParameters: A LaunchTemplateParameters resource to be passed as the request body. location: The location to which to direct the request. projectId: Required. The ID of the Cloud Platform project that the job belongs to. validateOnly: If true, the request is validated but not actually executed. Defaults to false. """ gcsPath = _messages.StringField(1) launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 2) location = _messages.StringField(3, required=True) projectId = _messages.StringField(4, required=True) validateOnly = _messages.BooleanField(5) class DataflowProjectsLocationsWorkerMessagesRequest(_messages.Message): """A DataflowProjectsLocationsWorkerMessagesRequest object. Fields: location: The location which contains the job projectId: The project to send the WorkerMessages to. sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be passed as the request body. """ location = _messages.StringField(1, required=True) projectId = _messages.StringField(2, required=True) sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 3) class DataflowProjectsTemplatesCreateRequest(_messages.Message): """A DataflowProjectsTemplatesCreateRequest object. Fields: createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to be passed as the request body. projectId: Required. The ID of the Cloud Platform project that the job belongs to. """ createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1) projectId = _messages.StringField(2, required=True) class DataflowProjectsTemplatesGetRequest(_messages.Message): """A DataflowProjectsTemplatesGetRequest object. Enums: ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY. Fields: gcsPath: Required. A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with `gs://`. location: The location to which to direct the request. projectId: Required. The ID of the Cloud Platform project that the job belongs to. view: The view to retrieve. Defaults to METADATA_ONLY. """ class ViewValueValuesEnum(_messages.Enum): """The view to retrieve. Defaults to METADATA_ONLY. Values: METADATA_ONLY: <no description> """ METADATA_ONLY = 0 gcsPath = _messages.StringField(1) location = _messages.StringField(2) projectId = _messages.StringField(3, required=True) view = _messages.EnumField('ViewValueValuesEnum', 4) class DataflowProjectsTemplatesLaunchRequest(_messages.Message): """A DataflowProjectsTemplatesLaunchRequest object. Fields: gcsPath: Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'. launchTemplateParameters: A LaunchTemplateParameters resource to be passed as the request body. location: The location to which to direct the request. projectId: Required. The ID of the Cloud Platform project that the job belongs to. validateOnly: If true, the request is validated but not actually executed. Defaults to false. """ gcsPath = _messages.StringField(1) launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 2) location = _messages.StringField(3) projectId = _messages.StringField(4, required=True) validateOnly = _messages.BooleanField(5) class DataflowProjectsWorkerMessagesRequest(_messages.Message): """A DataflowProjectsWorkerMessagesRequest object. Fields: projectId: The project to send the WorkerMessages to. sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be passed as the request body. """ projectId = _messages.StringField(1, required=True) sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 2) class DerivedSource(_messages.Message): """Specification of one of the bundles produced as a result of splitting a Source (e.g. when executing a SourceSplitRequest, or when splitting an active task using WorkItemStatus.dynamic_source_split), relative to the source being split. Enums: DerivationModeValueValuesEnum: What source to base the produced source on (if any). Fields: derivationMode: What source to base the produced source on (if any). source: Specification of the source. """ class DerivationModeValueValuesEnum(_messages.Enum): """What source to base the produced source on (if any). Values: SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or unspecified. SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent Source with no base. SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the Source being split. SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the base of the Source being split. """ SOURCE_DERIVATION_MODE_UNKNOWN = 0 SOURCE_DERIVATION_MODE_INDEPENDENT = 1 SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2 SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3 derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1) source = _messages.MessageField('Source', 2) class Disk(_messages.Message): """Describes the data disk used by a workflow job. Fields: diskType: Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd- standard mountPoint: Directory in a VM where disk is mounted. sizeGb: Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default. """ diskType = _messages.StringField(1) mountPoint = _messages.StringField(2) sizeGb = _messages.IntegerField(3, variant=_messages.Variant.INT32) class DisplayData(_messages.Message): """Data provided with a pipeline or transform to provide descriptive info. Fields: boolValue: Contains value if the data is of a boolean type. durationValue: Contains value if the data is of duration type. floatValue: Contains value if the data is of float type. int64Value: Contains value if the data is of int64 type. javaClassValue: Contains value if the data is of java class type. key: The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system. label: An optional label to display in a dax UI for the element. namespace: The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering. shortStrValue: A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip. strValue: Contains value if the data is of string type. timestampValue: Contains value if the data is of timestamp type. url: An optional full URL. """ boolValue = _messages.BooleanField(1) durationValue = _messages.StringField(2) floatValue = _messages.FloatField(3, variant=_messages.Variant.FLOAT) int64Value = _messages.IntegerField(4) javaClassValue = _messages.StringField(5) key = _messages.StringField(6) label = _messages.StringField(7) namespace = _messages.StringField(8) shortStrValue = _messages.StringField(9) strValue = _messages.StringField(10) timestampValue = _messages.StringField(11) url = _messages.StringField(12) class DistributionUpdate(_messages.Message): """A metric value representing a distribution. Fields: count: The count of the number of elements present in the distribution. histogram: (Optional) Histogram of value counts for the distribution. max: The maximum value present in the distribution. min: The minimum value present in the distribution. sum: Use an int64 since we'd prefer the added precision. If overflow is a common problem we can detect it and use an additional int64 or a double. sumOfSquares: Use a double since the sum of squares is likely to overflow int64. """ count = _messages.MessageField('SplitInt64', 1) histogram = _messages.MessageField('Histogram', 2) max = _messages.MessageField('SplitInt64', 3) min = _messages.MessageField('SplitInt64', 4) sum = _messages.MessageField('SplitInt64', 5) sumOfSquares = _messages.FloatField(6) class DynamicSourceSplit(_messages.Message): """When a task splits using WorkItemStatus.dynamic_source_split, this message describes the two parts of the split relative to the description of the current task's input. Fields: primary: Primary part (continued to be processed by worker). Specified relative to the previously-current source. Becomes current. residual: Residual part (returned to the pool of work). Specified relative to the previously-current source. """ primary = _messages.MessageField('DerivedSource', 1) residual = _messages.MessageField('DerivedSource', 2) class Environment(_messages.Message): """Describes the environment in which a Dataflow Job runs. Messages: InternalExperimentsValue: Experimental settings. SdkPipelineOptionsValue: The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way. UserAgentValue: A description of the process that generated the request. VersionValue: A structure describing which components and their versions of the service are required in order to run the job. Fields: clusterManagerApiService: The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". dataset: The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} experiments: The list of experiments to enable. internalExperiments: Experimental settings. sdkPipelineOptions: The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way. serviceAccountEmail: Identity to run virtual machines as. Defaults to the default account. tempStoragePrefix: The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} userAgent: A description of the process that generated the request. version: A structure describing which components and their versions of the service are required in order to run the job. workerPools: The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers. """ @encoding.MapUnrecognizedFields('additionalProperties') class InternalExperimentsValue(_messages.Message): """Experimental settings. Messages: AdditionalProperty: An additional property for a InternalExperimentsValue object. Fields: additionalProperties: Properties of the object. Contains field @type with type URL. """ class AdditionalProperty(_messages.Message): """An additional property for a InternalExperimentsValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class SdkPipelineOptionsValue(_messages.Message): """The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way. Messages: AdditionalProperty: An additional property for a SdkPipelineOptionsValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a SdkPipelineOptionsValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class UserAgentValue(_messages.Message): """A description of the process that generated the request. Messages: AdditionalProperty: An additional property for a UserAgentValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a UserAgentValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class VersionValue(_messages.Message): """A structure describing which components and their versions of the service are required in order to run the job. Messages: AdditionalProperty: An additional property for a VersionValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a VersionValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) clusterManagerApiService = _messages.StringField(1) dataset = _messages.StringField(2) experiments = _messages.StringField(3, repeated=True) internalExperiments = _messages.MessageField('InternalExperimentsValue', 4) sdkPipelineOptions = _messages.MessageField('SdkPipelineOptionsValue', 5) serviceAccountEmail = _messages.StringField(6) tempStoragePrefix = _messages.StringField(7) userAgent = _messages.MessageField('UserAgentValue', 8) version = _messages.MessageField('VersionValue', 9) workerPools = _messages.MessageField('WorkerPool', 10, repeated=True) class ExecutionStageState(_messages.Message): """A message describing the state of a particular execution stage. Enums: ExecutionStageStateValueValuesEnum: Executions stage states allow the same set of values as JobState. Fields: currentStateTime: The time at which the stage transitioned to this state. executionStageName: The name of the execution stage. executionStageState: Executions stage states allow the same set of values as JobState. """ class ExecutionStageStateValueValuesEnum(_messages.Enum): """Executions stage states allow the same set of values as JobState. Values: JOB_STATE_UNKNOWN: The job's run state isn't specified. JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not yet started to run. JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is currently running. JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully completed. This is a terminal job state. This state may be set by the Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal state. JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`. JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has been explicitly cancelled. This is a terminal job state. This state may only be set via a Cloud Dataflow `UpdateJob` call, and only if the job has not yet reached another terminal state. JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was successfully updated, meaning that this job was stopped and another job was started, inheriting state from this one. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`. JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in the process of draining. A draining job has stopped pulling from its input sources and is processing any data that remains in-flight. This state may be set via a Cloud Dataflow `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that are draining may only transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been drained. A drained job terminated by stopping pulling from its input sources and processing any data that remained in-flight when draining was requested. This state is a terminal state, may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_DRAINING`. JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been created but is not yet running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`. JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has been explicitly cancelled and is in the process of stopping. Jobs that are cancelling may only transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'. """ JOB_STATE_UNKNOWN = 0 JOB_STATE_STOPPED = 1 JOB_STATE_RUNNING = 2 JOB_STATE_DONE = 3 JOB_STATE_FAILED = 4 JOB_STATE_CANCELLED = 5 JOB_STATE_UPDATED = 6 JOB_STATE_DRAINING = 7 JOB_STATE_DRAINED = 8 JOB_STATE_PENDING = 9 JOB_STATE_CANCELLING = 10 currentStateTime = _messages.StringField(1) executionStageName = _messages.StringField(2) executionStageState = _messages.EnumField('ExecutionStageStateValueValuesEnum', 3) class ExecutionStageSummary(_messages.Message): """Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning. Enums: KindValueValuesEnum: Type of tranform this stage is executing. Fields: componentSource: Collections produced and consumed by component transforms of this stage. componentTransform: Transforms that comprise this execution stage. id: Dataflow service generated id for this stage. inputSource: Input sources for this stage. kind: Type of tranform this stage is executing. name: Dataflow service generated name for this stage. outputSource: Output sources for this stage. """ class KindValueValuesEnum(_messages.Enum): """Type of tranform this stage is executing. Values: UNKNOWN_KIND: Unrecognized transform type. PAR_DO_KIND: ParDo transform. GROUP_BY_KEY_KIND: Group By Key transform. FLATTEN_KIND: Flatten transform. READ_KIND: Read transform. WRITE_KIND: Write transform. CONSTANT_KIND: Constructs from a constant value, such as with Create.of. SINGLETON_KIND: Creates a Singleton view of a collection. SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a GroupByKey. """ UNKNOWN_KIND = 0 PAR_DO_KIND = 1 GROUP_BY_KEY_KIND = 2 FLATTEN_KIND = 3 READ_KIND = 4 WRITE_KIND = 5 CONSTANT_KIND = 6 SINGLETON_KIND = 7 SHUFFLE_KIND = 8 componentSource = _messages.MessageField('ComponentSource', 1, repeated=True) componentTransform = _messages.MessageField('ComponentTransform', 2, repeated=True) id = _messages.StringField(3) inputSource = _messages.MessageField('StageSource', 4, repeated=True) kind = _messages.EnumField('KindValueValuesEnum', 5) name = _messages.StringField(6) outputSource = _messages.MessageField('StageSource', 7, repeated=True) class FailedLocation(_messages.Message): """Indicates which location failed to respond to a request for data. Fields: name: The name of the failed location. """ name = _messages.StringField(1) class FlattenInstruction(_messages.Message): """An instruction that copies its inputs (zero or more) to its (single) output. Fields: inputs: Describes the inputs to the flatten instruction. """ inputs = _messages.MessageField('InstructionInput', 1, repeated=True) class FloatingPointList(_messages.Message): """A metric value representing a list of floating point numbers. Fields: elements: Elements of the list. """ elements = _messages.FloatField(1, repeated=True) class FloatingPointMean(_messages.Message): """A representation of a floating point mean metric contribution. Fields: count: The number of values being aggregated. sum: The sum of all values being aggregated. """ count = _messages.MessageField('SplitInt64', 1) sum = _messages.FloatField(2) class GetDebugConfigRequest(_messages.Message): """Request to get updated debug configuration for component. Fields: componentId: The internal component id for which debug configuration is requested. location: The location which contains the job specified by job_id. workerId: The worker id, i.e., VM hostname. """ componentId = _messages.StringField(1) location = _messages.StringField(2) workerId = _messages.StringField(3) class GetDebugConfigResponse(_messages.Message): """Response to a get debug configuration request. Fields: config: The encoded debug configuration for the requested component. """ config = _messages.StringField(1) class GetTemplateResponse(_messages.Message): """The response to a GetTemplate request. Fields: metadata: The template metadata describing the template name, available parameters, etc. status: The status of the get template request. Any problems with the request will be indicated in the error_details. """ metadata = _messages.MessageField('TemplateMetadata', 1) status = _messages.MessageField('Status', 2) class Histogram(_messages.Message): """Histogram of value counts for a distribution. Buckets have an inclusive lower bound and exclusive upper bound and use "1,2,5 bucketing": The first bucket range is from [0,1) and all subsequent bucket boundaries are powers of ten multiplied by 1, 2, or 5. Thus, bucket boundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, ... Negative values are not supported. Fields: bucketCounts: Counts of values in each bucket. For efficiency, prefix and trailing buckets with count = 0 are elided. Buckets can store the full range of values of an unsigned long, with ULLONG_MAX falling into the 59th bucket with range [1e19, 2e19). firstBucketOffset: Starting index of first stored bucket. The non- inclusive upper-bound of the ith bucket is given by: pow(10,(i-first_bucket_offset)/3) * (1,2,5)[(i-first_bucket_offset)%3] """ bucketCounts = _messages.IntegerField(1, repeated=True) firstBucketOffset = _messages.IntegerField(2, variant=_messages.Variant.INT32) class InstructionInput(_messages.Message): """An input of an instruction, as a reference to an output of a producer instruction. Fields: outputNum: The output index (origin zero) within the producer. producerInstructionIndex: The index (origin zero) of the parallel instruction that produces the output to be consumed by this input. This index is relative to the list of instructions in this input's instruction's containing MapTask. """ outputNum = _messages.IntegerField(1, variant=_messages.Variant.INT32) producerInstructionIndex = _messages.IntegerField(2, variant=_messages.Variant.INT32) class InstructionOutput(_messages.Message): """An output of an instruction. Messages: CodecValue: The codec to use to encode data being written via this output. Fields: codec: The codec to use to encode data being written via this output. name: The user-provided name of this output. onlyCountKeyBytes: For system-generated byte and mean byte metrics, certain instructions should only report the key size. onlyCountValueBytes: For system-generated byte and mean byte metrics, certain instructions should only report the value size. originalName: System-defined name for this output in the original workflow graph. Outputs that do not contribute to an original instruction do not set this. systemName: System-defined name of this output. Unique across the workflow. """ @encoding.MapUnrecognizedFields('additionalProperties') class CodecValue(_messages.Message): """The codec to use to encode data being written via this output. Messages: AdditionalProperty: An additional property for a CodecValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a CodecValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) codec = _messages.MessageField('CodecValue', 1) name = _messages.StringField(2) onlyCountKeyBytes = _messages.BooleanField(3) onlyCountValueBytes = _messages.BooleanField(4) originalName = _messages.StringField(5) systemName = _messages.StringField(6) class IntegerGauge(_messages.Message): """A metric value representing temporal values of a variable. Fields: timestamp: The time at which this value was measured. Measured as msecs from epoch. value: The value of the variable represented by this gauge. """ timestamp = _messages.StringField(1) value = _messages.MessageField('SplitInt64', 2) class IntegerList(_messages.Message): """A metric value representing a list of integers. Fields: elements: Elements of the list. """ elements = _messages.MessageField('SplitInt64', 1, repeated=True) class IntegerMean(_messages.Message): """A representation of an integer mean metric contribution. Fields: count: The number of values being aggregated. sum: The sum of all values being aggregated. """ count = _messages.MessageField('SplitInt64', 1) sum = _messages.MessageField('SplitInt64', 2) class Job(_messages.Message): """Defines a job to be run by the Cloud Dataflow service. Enums: CurrentStateValueValuesEnum: The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. RequestedStateValueValuesEnum: The job's requested state. `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may also be used to directly set a job's requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the job if it has not already reached a terminal state. TypeValueValuesEnum: The type of Cloud Dataflow job. Messages: LabelsValue: User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. TransformNameMappingValue: The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Fields: clientRequestId: The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it. createTime: The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service. currentState: The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. currentStateTime: The timestamp associated with the current state. environment: The environment for the job. executionInfo: Deprecated. id: The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job. labels: User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. location: The location that contains this job. name: The user-specified Cloud Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt returns the existing Job. The name must match the regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?` pipelineDescription: Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. projectId: The ID of the Cloud Platform project that the job belongs to. replaceJobId: If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job. replacedByJobId: If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job. requestedState: The job's requested state. `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may also be used to directly set a job's requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the job if it has not already reached a terminal state. stageStates: This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. steps: The top-level steps that constitute the entire job. tempFiles: A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} transformNameMapping: The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. type: The type of Cloud Dataflow job. """ class CurrentStateValueValuesEnum(_messages.Enum): """The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. Values: JOB_STATE_UNKNOWN: The job's run state isn't specified. JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not yet started to run. JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is currently running. JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully completed. This is a terminal job state. This state may be set by the Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal state. JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`. JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has been explicitly cancelled. This is a terminal job state. This state may only be set via a Cloud Dataflow `UpdateJob` call, and only if the job has not yet reached another terminal state. JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was successfully updated, meaning that this job was stopped and another job was started, inheriting state from this one. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`. JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in the process of draining. A draining job has stopped pulling from its input sources and is processing any data that remains in-flight. This state may be set via a Cloud Dataflow `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that are draining may only transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been drained. A drained job terminated by stopping pulling from its input sources and processing any data that remained in-flight when draining was requested. This state is a terminal state, may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_DRAINING`. JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been created but is not yet running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`. JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has been explicitly cancelled and is in the process of stopping. Jobs that are cancelling may only transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'. """ JOB_STATE_UNKNOWN = 0 JOB_STATE_STOPPED = 1 JOB_STATE_RUNNING = 2 JOB_STATE_DONE = 3 JOB_STATE_FAILED = 4 JOB_STATE_CANCELLED = 5 JOB_STATE_UPDATED = 6 JOB_STATE_DRAINING = 7 JOB_STATE_DRAINED = 8 JOB_STATE_PENDING = 9 JOB_STATE_CANCELLING = 10 class RequestedStateValueValuesEnum(_messages.Enum): """The job's requested state. `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may also be used to directly set a job's requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the job if it has not already reached a terminal state. Values: JOB_STATE_UNKNOWN: The job's run state isn't specified. JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not yet started to run. JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is currently running. JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully completed. This is a terminal job state. This state may be set by the Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal state. JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`. JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has been explicitly cancelled. This is a terminal job state. This state may only be set via a Cloud Dataflow `UpdateJob` call, and only if the job has not yet reached another terminal state. JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was successfully updated, meaning that this job was stopped and another job was started, inheriting state from this one. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`. JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in the process of draining. A draining job has stopped pulling from its input sources and is processing any data that remains in-flight. This state may be set via a Cloud Dataflow `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that are draining may only transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been drained. A drained job terminated by stopping pulling from its input sources and processing any data that remained in-flight when draining was requested. This state is a terminal state, may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_DRAINING`. JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been created but is not yet running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`. JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has been explicitly cancelled and is in the process of stopping. Jobs that are cancelling may only transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'. """ JOB_STATE_UNKNOWN = 0 JOB_STATE_STOPPED = 1 JOB_STATE_RUNNING = 2 JOB_STATE_DONE = 3 JOB_STATE_FAILED = 4 JOB_STATE_CANCELLED = 5 JOB_STATE_UPDATED = 6 JOB_STATE_DRAINING = 7 JOB_STATE_DRAINED = 8 JOB_STATE_PENDING = 9 JOB_STATE_CANCELLING = 10 class TypeValueValuesEnum(_messages.Enum): """The type of Cloud Dataflow job. Values: JOB_TYPE_UNKNOWN: The type of the job is unspecified, or unknown. JOB_TYPE_BATCH: A batch job with a well-defined end point: data is read, data is processed, data is written, and the job is done. JOB_TYPE_STREAMING: A continuously streaming job with no end: data is read, processed, and written continuously. """ JOB_TYPE_UNKNOWN = 0 JOB_TYPE_BATCH = 1 JOB_TYPE_STREAMING = 2 @encoding.MapUnrecognizedFields('additionalProperties') class LabelsValue(_messages.Message): """User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. Messages: AdditionalProperty: An additional property for a LabelsValue object. Fields: additionalProperties: Additional properties of type LabelsValue """ class AdditionalProperty(_messages.Message): """An additional property for a LabelsValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class TransformNameMappingValue(_messages.Message): """The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Messages: AdditionalProperty: An additional property for a TransformNameMappingValue object. Fields: additionalProperties: Additional properties of type TransformNameMappingValue """ class AdditionalProperty(_messages.Message): """An additional property for a TransformNameMappingValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) clientRequestId = _messages.StringField(1) createTime = _messages.StringField(2) currentState = _messages.EnumField('CurrentStateValueValuesEnum', 3) currentStateTime = _messages.StringField(4) environment = _messages.MessageField('Environment', 5) executionInfo = _messages.MessageField('JobExecutionInfo', 6) id = _messages.StringField(7) labels = _messages.MessageField('LabelsValue', 8) location = _messages.StringField(9) name = _messages.StringField(10) pipelineDescription = _messages.MessageField('PipelineDescription', 11) projectId = _messages.StringField(12) replaceJobId = _messages.StringField(13) replacedByJobId = _messages.StringField(14) requestedState = _messages.EnumField('RequestedStateValueValuesEnum', 15) stageStates = _messages.MessageField('ExecutionStageState', 16, repeated=True) steps = _messages.MessageField('Step', 17, repeated=True) tempFiles = _messages.StringField(18, repeated=True) transformNameMapping = _messages.MessageField('TransformNameMappingValue', 19) type = _messages.EnumField('TypeValueValuesEnum', 20) class JobExecutionInfo(_messages.Message): """Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job. Messages: StagesValue: A mapping from each stage to the information about that stage. Fields: stages: A mapping from each stage to the information about that stage. """ @encoding.MapUnrecognizedFields('additionalProperties') class StagesValue(_messages.Message): """A mapping from each stage to the information about that stage. Messages: AdditionalProperty: An additional property for a StagesValue object. Fields: additionalProperties: Additional properties of type StagesValue """ class AdditionalProperty(_messages.Message): """An additional property for a StagesValue object. Fields: key: Name of the additional property. value: A JobExecutionStageInfo attribute. """ key = _messages.StringField(1) value = _messages.MessageField('JobExecutionStageInfo', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) stages = _messages.MessageField('StagesValue', 1) class JobExecutionStageInfo(_messages.Message): """Contains information about how a particular google.dataflow.v1beta3.Step will be executed. Fields: stepName: The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage. """ stepName = _messages.StringField(1, repeated=True) class JobMessage(_messages.Message): """A particular message pertaining to a Dataflow job. Enums: MessageImportanceValueValuesEnum: Importance level of the message. Fields: id: Deprecated. messageImportance: Importance level of the message. messageText: The text of the message. time: The timestamp of the message. """ class MessageImportanceValueValuesEnum(_messages.Enum): """Importance level of the message. Values: JOB_MESSAGE_IMPORTANCE_UNKNOWN: The message importance isn't specified, or is unknown. JOB_MESSAGE_DEBUG: The message is at the 'debug' level: typically only useful for software engineers working on the code the job is running. Typically, Dataflow pipeline runners do not display log messages at this level by default. JOB_MESSAGE_DETAILED: The message is at the 'detailed' level: somewhat verbose, but potentially useful to users. Typically, Dataflow pipeline runners do not display log messages at this level by default. These messages are displayed by default in the Dataflow monitoring UI. JOB_MESSAGE_BASIC: The message is at the 'basic' level: useful for keeping track of the execution of a Dataflow pipeline. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI. JOB_MESSAGE_WARNING: The message is at the 'warning' level: indicating a condition pertaining to a job which may require human intervention. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI. JOB_MESSAGE_ERROR: The message is at the 'error' level: indicating a condition preventing a job from succeeding. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI. """ JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0 JOB_MESSAGE_DEBUG = 1 JOB_MESSAGE_DETAILED = 2 JOB_MESSAGE_BASIC = 3 JOB_MESSAGE_WARNING = 4 JOB_MESSAGE_ERROR = 5 id = _messages.StringField(1) messageImportance = _messages.EnumField('MessageImportanceValueValuesEnum', 2) messageText = _messages.StringField(3) time = _messages.StringField(4) class JobMetrics(_messages.Message): """JobMetrics contains a collection of metrics descibing the detailed progress of a Dataflow job. Metrics correspond to user-defined and system- defined metrics in the job. This resource captures only the most recent values of each metric; time-series data can be queried for them (under the same metric names) from Cloud Monitoring. Fields: metricTime: Timestamp as of which metric values are current. metrics: All metrics for this job. """ metricTime = _messages.StringField(1) metrics = _messages.MessageField('MetricUpdate', 2, repeated=True) class KeyRangeDataDiskAssignment(_messages.Message): """Data disk assignment information for a specific key-range of a sharded computation. Currently we only support UTF-8 character splits to simplify encoding into JSON. Fields: dataDisk: The name of the data disk where data for this range is stored. This name is local to the Google Cloud Platform project and uniquely identifies the disk within that project, for example "myproject-1014-104817-4c2-harness-0-disk-1". end: The end (exclusive) of the key range. start: The start (inclusive) of the key range. """ dataDisk = _messages.StringField(1) end = _messages.StringField(2) start = _messages.StringField(3) class KeyRangeLocation(_messages.Message): """Location information for a specific key-range of a sharded computation. Currently we only support UTF-8 character splits to simplify encoding into JSON. Fields: dataDisk: The name of the data disk where data for this range is stored. This name is local to the Google Cloud Platform project and uniquely identifies the disk within that project, for example "myproject-1014-104817-4c2-harness-0-disk-1". deliveryEndpoint: The physical location of this range assignment to be used for streaming computation cross-worker message delivery. deprecatedPersistentDirectory: DEPRECATED. The location of the persistent state for this range, as a persistent directory in the worker local filesystem. end: The end (exclusive) of the key range. start: The start (inclusive) of the key range. """ dataDisk = _messages.StringField(1) deliveryEndpoint = _messages.StringField(2) deprecatedPersistentDirectory = _messages.StringField(3) end = _messages.StringField(4) start = _messages.StringField(5) class LaunchTemplateParameters(_messages.Message): """Parameters to provide to the template being launched. Messages: ParametersValue: The runtime parameters to pass to the job. Fields: environment: The runtime environment for the job. jobName: Required. The job name to use for the created job. parameters: The runtime parameters to pass to the job. """ @encoding.MapUnrecognizedFields('additionalProperties') class ParametersValue(_messages.Message): """The runtime parameters to pass to the job. Messages: AdditionalProperty: An additional property for a ParametersValue object. Fields: additionalProperties: Additional properties of type ParametersValue """ class AdditionalProperty(_messages.Message): """An additional property for a ParametersValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) environment = _messages.MessageField('RuntimeEnvironment', 1) jobName = _messages.StringField(2) parameters = _messages.MessageField('ParametersValue', 3) class LaunchTemplateResponse(_messages.Message): """Response to the request to launch a template. Fields: job: The job that was launched, if the request was not a dry run and the job was successfully launched. """ job = _messages.MessageField('Job', 1) class LeaseWorkItemRequest(_messages.Message): """Request to lease WorkItems. Fields: currentWorkerTime: The current timestamp at the worker. location: The location which contains the WorkItem's job. requestedLeaseDuration: The initial lease period. workItemTypes: Filter for WorkItem type. workerCapabilities: Worker capabilities. WorkItems might be limited to workers with specific capabilities. workerId: Identifies the worker leasing work -- typically the ID of the virtual machine running the worker. """ currentWorkerTime = _messages.StringField(1) location = _messages.StringField(2) requestedLeaseDuration = _messages.StringField(3) workItemTypes = _messages.StringField(4, repeated=True) workerCapabilities = _messages.StringField(5, repeated=True) workerId = _messages.StringField(6) class LeaseWorkItemResponse(_messages.Message): """Response to a request to lease WorkItems. Fields: workItems: A list of the leased WorkItems. """ workItems = _messages.MessageField('WorkItem', 1, repeated=True) class ListJobMessagesResponse(_messages.Message): """Response to a request to list job messages. Fields: autoscalingEvents: Autoscaling events in ascending timestamp order. jobMessages: Messages in ascending timestamp order. nextPageToken: The token to obtain the next page of results if there are more. """ autoscalingEvents = _messages.MessageField('AutoscalingEvent', 1, repeated=True) jobMessages = _messages.MessageField('JobMessage', 2, repeated=True) nextPageToken = _messages.StringField(3) class ListJobsResponse(_messages.Message): """Response to a request to list Cloud Dataflow jobs. This may be a partial response, depending on the page size in the ListJobsRequest. Fields: failedLocation: Zero or more messages describing locations that failed to respond. jobs: A subset of the requested job information. nextPageToken: Set if there may be more results than fit in this response. """ failedLocation = _messages.MessageField('FailedLocation', 1, repeated=True) jobs = _messages.MessageField('Job', 2, repeated=True) nextPageToken = _messages.StringField(3) class MapTask(_messages.Message): """MapTask consists of an ordered set of instructions, each of which describes one particular low-level operation for the worker to perform in order to accomplish the MapTask's WorkItem. Each instruction must appear in the list before any instructions which depends on its output. Fields: instructions: The instructions in the MapTask. stageName: System-defined name of the stage containing this MapTask. Unique across the workflow. systemName: System-defined name of this MapTask. Unique across the workflow. """ instructions = _messages.MessageField('ParallelInstruction', 1, repeated=True) stageName = _messages.StringField(2) systemName = _messages.StringField(3) class MetricShortId(_messages.Message): """The metric short id is returned to the user alongside an offset into ReportWorkItemStatusRequest Fields: metricIndex: The index of the corresponding metric in the ReportWorkItemStatusRequest. Required. shortId: The service-generated short identifier for the metric. """ metricIndex = _messages.IntegerField(1, variant=_messages.Variant.INT32) shortId = _messages.IntegerField(2) class MetricStructuredName(_messages.Message): """Identifies a metric, by describing the source which generated the metric. Messages: ContextValue: Zero or more labeled fields which identify the part of the job this metric is associated with, such as the name of a step or collection. For example, built-in counters associated with steps will have context['step'] = <step-name>. Counters associated with PCollections in the SDK will have context['pcollection'] = <pcollection- name>. Fields: context: Zero or more labeled fields which identify the part of the job this metric is associated with, such as the name of a step or collection. For example, built-in counters associated with steps will have context['step'] = <step-name>. Counters associated with PCollections in the SDK will have context['pcollection'] = <pcollection- name>. name: Worker-defined metric name. origin: Origin (namespace) of metric name. May be blank for user-define metrics; will be "dataflow" for metrics defined by the Dataflow service or SDK. """ @encoding.MapUnrecognizedFields('additionalProperties') class ContextValue(_messages.Message): """Zero or more labeled fields which identify the part of the job this metric is associated with, such as the name of a step or collection. For example, built-in counters associated with steps will have context['step'] = <step-name>. Counters associated with PCollections in the SDK will have context['pcollection'] = <pcollection-name>. Messages: AdditionalProperty: An additional property for a ContextValue object. Fields: additionalProperties: Additional properties of type ContextValue """ class AdditionalProperty(_messages.Message): """An additional property for a ContextValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) context = _messages.MessageField('ContextValue', 1) name = _messages.StringField(2) origin = _messages.StringField(3) class MetricUpdate(_messages.Message): """Describes the state of a metric. Fields: cumulative: True if this metric is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this metric is reported as a delta that is not associated with any WorkItem. distribution: A struct value describing properties of a distribution of numeric values. internal: Worker-computed aggregate value for internal use by the Dataflow service. kind: Metric aggregation kind. The possible metric aggregation kinds are "Sum", "Max", "Min", "Mean", "Set", "And", "Or", and "Distribution". The specified aggregation kind is case-insensitive. If omitted, this is not an aggregated value but instead a single metric sample value. meanCount: Worker-computed aggregate value for the "Mean" aggregation kind. This holds the count of the aggregated values and is used in combination with mean_sum above to obtain the actual mean aggregate value. The only possible value type is Long. meanSum: Worker-computed aggregate value for the "Mean" aggregation kind. This holds the sum of the aggregated values and is used in combination with mean_count below to obtain the actual mean aggregate value. The only possible value types are Long and Double. name: Name of the metric. scalar: Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min", "And", and "Or". The possible value types are Long, Double, and Boolean. set: Worker-computed aggregate value for the "Set" aggregation kind. The only possible value type is a list of Values whose type can be Long, Double, or String, according to the metric's type. All Values in the list must be of the same type. updateTime: Timestamp associated with the metric value. Optional when workers are reporting work progress; it will be filled in responses from the metrics API. """ cumulative = _messages.BooleanField(1) distribution = _messages.MessageField('extra_types.JsonValue', 2) internal = _messages.MessageField('extra_types.JsonValue', 3) kind = _messages.StringField(4) meanCount = _messages.MessageField('extra_types.JsonValue', 5) meanSum = _messages.MessageField('extra_types.JsonValue', 6) name = _messages.MessageField('MetricStructuredName', 7) scalar = _messages.MessageField('extra_types.JsonValue', 8) set = _messages.MessageField('extra_types.JsonValue', 9) updateTime = _messages.StringField(10) class MountedDataDisk(_messages.Message): """Describes mounted data disk. Fields: dataDisk: The name of the data disk. This name is local to the Google Cloud Platform project and uniquely identifies the disk within that project, for example "myproject-1014-104817-4c2-harness-0-disk-1". """ dataDisk = _messages.StringField(1) class MultiOutputInfo(_messages.Message): """Information about an output of a multi-output DoFn. Fields: tag: The id of the tag the user code will emit to this output by; this should correspond to the tag of some SideInputInfo. """ tag = _messages.StringField(1) class NameAndKind(_messages.Message): """Basic metadata about a counter. Enums: KindValueValuesEnum: Counter aggregation kind. Fields: kind: Counter aggregation kind. name: Name of the counter. """ class KindValueValuesEnum(_messages.Enum): """Counter aggregation kind. Values: INVALID: Counter aggregation kind was not set. SUM: Aggregated value is the sum of all contributed values. MAX: Aggregated value is the max of all contributed values. MIN: Aggregated value is the min of all contributed values. MEAN: Aggregated value is the mean of all contributed values. OR: Aggregated value represents the logical 'or' of all contributed values. AND: Aggregated value represents the logical 'and' of all contributed values. SET: Aggregated value is a set of unique contributed values. DISTRIBUTION: Aggregated value captures statistics about a distribution. LATEST_VALUE: Aggregated value tracks the latest value of a variable. """ INVALID = 0 SUM = 1 MAX = 2 MIN = 3 MEAN = 4 OR = 5 AND = 6 SET = 7 DISTRIBUTION = 8 LATEST_VALUE = 9 kind = _messages.EnumField('KindValueValuesEnum', 1) name = _messages.StringField(2) class Package(_messages.Message): """The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run. Fields: location: The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/ name: The name of the package. """ location = _messages.StringField(1) name = _messages.StringField(2) class ParDoInstruction(_messages.Message): """An instruction that does a ParDo operation. Takes one main input and zero or more side inputs, and produces zero or more outputs. Runs user code. Messages: UserFnValue: The user function to invoke. Fields: input: The input. multiOutputInfos: Information about each of the outputs, if user_fn is a MultiDoFn. numOutputs: The number of outputs. sideInputs: Zero or more side inputs. userFn: The user function to invoke. """ @encoding.MapUnrecognizedFields('additionalProperties') class UserFnValue(_messages.Message): """The user function to invoke. Messages: AdditionalProperty: An additional property for a UserFnValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a UserFnValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) input = _messages.MessageField('InstructionInput', 1) multiOutputInfos = _messages.MessageField('MultiOutputInfo', 2, repeated=True) numOutputs = _messages.IntegerField(3, variant=_messages.Variant.INT32) sideInputs = _messages.MessageField('SideInputInfo', 4, repeated=True) userFn = _messages.MessageField('UserFnValue', 5) class ParallelInstruction(_messages.Message): """Describes a particular operation comprising a MapTask. Fields: flatten: Additional information for Flatten instructions. name: User-provided name of this operation. originalName: System-defined name for the operation in the original workflow graph. outputs: Describes the outputs of the instruction. parDo: Additional information for ParDo instructions. partialGroupByKey: Additional information for PartialGroupByKey instructions. read: Additional information for Read instructions. systemName: System-defined name of this operation. Unique across the workflow. write: Additional information for Write instructions. """ flatten = _messages.MessageField('FlattenInstruction', 1) name = _messages.StringField(2) originalName = _messages.StringField(3) outputs = _messages.MessageField('InstructionOutput', 4, repeated=True) parDo = _messages.MessageField('ParDoInstruction', 5) partialGroupByKey = _messages.MessageField('PartialGroupByKeyInstruction', 6) read = _messages.MessageField('ReadInstruction', 7) systemName = _messages.StringField(8) write = _messages.MessageField('WriteInstruction', 9) class Parameter(_messages.Message): """Structured data associated with this message. Fields: key: Key or name for this parameter. value: Value for this parameter. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) class ParameterMetadata(_messages.Message): """Metadata for a specific parameter. Fields: helpText: Required. The help text to display for the parameter. isOptional: Optional. Whether the parameter is optional. Defaults to false. label: Required. The label to display for the parameter. name: Required. The name of the parameter. regexes: Optional. Regexes that the parameter must match. """ helpText = _messages.StringField(1) isOptional = _messages.BooleanField(2) label = _messages.StringField(3) name = _messages.StringField(4) regexes = _messages.StringField(5, repeated=True) class PartialGroupByKeyInstruction(_messages.Message): """An instruction that does a partial group-by-key. One input and one output. Messages: InputElementCodecValue: The codec to use for interpreting an element in the input PTable. ValueCombiningFnValue: The value combining function to invoke. Fields: input: Describes the input to the partial group-by-key instruction. inputElementCodec: The codec to use for interpreting an element in the input PTable. originalCombineValuesInputStoreName: If this instruction includes a combining function this is the name of the intermediate store between the GBK and the CombineValues. originalCombineValuesStepName: If this instruction includes a combining function, this is the name of the CombineValues instruction lifted into this instruction. sideInputs: Zero or more side inputs. valueCombiningFn: The value combining function to invoke. """ @encoding.MapUnrecognizedFields('additionalProperties') class InputElementCodecValue(_messages.Message): """The codec to use for interpreting an element in the input PTable. Messages: AdditionalProperty: An additional property for a InputElementCodecValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a InputElementCodecValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class ValueCombiningFnValue(_messages.Message): """The value combining function to invoke. Messages: AdditionalProperty: An additional property for a ValueCombiningFnValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a ValueCombiningFnValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) input = _messages.MessageField('InstructionInput', 1) inputElementCodec = _messages.MessageField('InputElementCodecValue', 2) originalCombineValuesInputStoreName = _messages.StringField(3) originalCombineValuesStepName = _messages.StringField(4) sideInputs = _messages.MessageField('SideInputInfo', 5, repeated=True) valueCombiningFn = _messages.MessageField('ValueCombiningFnValue', 6) class PipelineDescription(_messages.Message): """A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpretting Dataflow provided metrics. Fields: displayData: Pipeline level display data. executionPipelineStage: Description of each stage of execution of the pipeline. originalPipelineTransform: Description of each transform in the pipeline and collections between them. """ displayData = _messages.MessageField('DisplayData', 1, repeated=True) executionPipelineStage = _messages.MessageField('ExecutionStageSummary', 2, repeated=True) originalPipelineTransform = _messages.MessageField('TransformSummary', 3, repeated=True) class Position(_messages.Message): """Position defines a position within a collection of data. The value can be either the end position, a key (used with ordered collections), a byte offset, or a record index. Fields: byteOffset: Position is a byte offset. concatPosition: CloudPosition is a concat position. end: Position is past all other positions. Also useful for the end position of an unbounded range. key: Position is a string key, ordered lexicographically. recordIndex: Position is a record index. shufflePosition: CloudPosition is a base64 encoded BatchShufflePosition (with FIXED sharding). """ byteOffset = _messages.IntegerField(1) concatPosition = _messages.MessageField('ConcatPosition', 2) end = _messages.BooleanField(3) key = _messages.StringField(4) recordIndex = _messages.IntegerField(5) shufflePosition = _messages.StringField(6) class PubsubLocation(_messages.Message): """Identifies a pubsub location to use for transferring data into or out of a streaming Dataflow job. Fields: dropLateData: Indicates whether the pipeline allows late-arriving data. idLabel: If set, contains a pubsub label from which to extract record ids. If left empty, record deduplication will be strictly best effort. subscription: A pubsub subscription, in the form of "pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>" timestampLabel: If set, contains a pubsub label from which to extract record timestamps. If left empty, record timestamps will be generated upon arrival. topic: A pubsub topic, in the form of "pubsub.googleapis.com/topics /<project-id>/<topic-name>" trackingSubscription: If set, specifies the pubsub subscription that will be used for tracking custom time timestamps for watermark estimation. withAttributes: If true, then the client has requested to get pubsub attributes. """ dropLateData = _messages.BooleanField(1) idLabel = _messages.StringField(2) subscription = _messages.StringField(3) timestampLabel = _messages.StringField(4) topic = _messages.StringField(5) trackingSubscription = _messages.StringField(6) withAttributes = _messages.BooleanField(7) class ReadInstruction(_messages.Message): """An instruction that reads records. Takes no inputs, produces one output. Fields: source: The source to read from. """ source = _messages.MessageField('Source', 1) class ReportWorkItemStatusRequest(_messages.Message): """Request to report the status of WorkItems. Fields: currentWorkerTime: The current timestamp at the worker. location: The location which contains the WorkItem's job. workItemStatuses: The order is unimportant, except that the order of the WorkItemServiceState messages in the ReportWorkItemStatusResponse corresponds to the order of WorkItemStatus messages here. workerId: The ID of the worker reporting the WorkItem status. If this does not match the ID of the worker which the Dataflow service believes currently has the lease on the WorkItem, the report will be dropped (with an error response). """ currentWorkerTime = _messages.StringField(1) location = _messages.StringField(2) workItemStatuses = _messages.MessageField('WorkItemStatus', 3, repeated=True) workerId = _messages.StringField(4) class ReportWorkItemStatusResponse(_messages.Message): """Response from a request to report the status of WorkItems. Fields: workItemServiceStates: A set of messages indicating the service-side state for each WorkItem whose status was reported, in the same order as the WorkItemStatus messages in the ReportWorkItemStatusRequest which resulting in this response. """ workItemServiceStates = _messages.MessageField('WorkItemServiceState', 1, repeated=True) class ReportedParallelism(_messages.Message): """Represents the level of parallelism in a WorkItem's input, reported by the worker. Fields: isInfinite: Specifies whether the parallelism is infinite. If true, "value" is ignored. Infinite parallelism means the service will assume that the work item can always be split into more non-empty work items by dynamic splitting. This is a work-around for lack of support for infinity by the current JSON-based Java RPC stack. value: Specifies the level of parallelism in case it is finite. """ isInfinite = _messages.BooleanField(1) value = _messages.FloatField(2) class ResourceUtilizationReport(_messages.Message): """Worker metrics exported from workers. This contains resource utilization metrics accumulated from a variety of sources. For more information, see go /df-resource-signals. Fields: cpuTime: CPU utilization samples. """ cpuTime = _messages.MessageField('CPUTime', 1, repeated=True) class ResourceUtilizationReportResponse(_messages.Message): """Service-side response to WorkerMessage reporting resource utilization. """ class RuntimeEnvironment(_messages.Message): """The environment values to set at runtime. Fields: additionalExperiments: Additional experiment flags for the job. bypassTempDirValidation: Whether to bypass the safety checks for the job's temporary directory. Use with caution. machineType: The machine type to use for the job. Defaults to the value from the template if not specified. maxWorkers: The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000. network: Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default". serviceAccountEmail: The email address of the service account to run the job as. subnetwork: Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK". tempLocation: The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`. zone: The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. """ additionalExperiments = _messages.StringField(1, repeated=True) bypassTempDirValidation = _messages.BooleanField(2) machineType = _messages.StringField(3) maxWorkers = _messages.IntegerField(4, variant=_messages.Variant.INT32) network = _messages.StringField(5) serviceAccountEmail = _messages.StringField(6) subnetwork = _messages.StringField(7) tempLocation = _messages.StringField(8) zone = _messages.StringField(9) class SendDebugCaptureRequest(_messages.Message): """Request to send encoded debug information. Fields: componentId: The internal component id for which debug information is sent. data: The encoded debug information. location: The location which contains the job specified by job_id. workerId: The worker id, i.e., VM hostname. """ componentId = _messages.StringField(1) data = _messages.StringField(2) location = _messages.StringField(3) workerId = _messages.StringField(4) class SendDebugCaptureResponse(_messages.Message): """Response to a send capture request. nothing""" class SendWorkerMessagesRequest(_messages.Message): """A request for sending worker messages to the service. Fields: location: The location which contains the job workerMessages: The WorkerMessages to send. """ location = _messages.StringField(1) workerMessages = _messages.MessageField('WorkerMessage', 2, repeated=True) class SendWorkerMessagesResponse(_messages.Message): """The response to the worker messages. Fields: workerMessageResponses: The servers response to the worker messages. """ workerMessageResponses = _messages.MessageField('WorkerMessageResponse', 1, repeated=True) class SeqMapTask(_messages.Message): """Describes a particular function to invoke. Messages: UserFnValue: The user function to invoke. Fields: inputs: Information about each of the inputs. name: The user-provided name of the SeqDo operation. outputInfos: Information about each of the outputs. stageName: System-defined name of the stage containing the SeqDo operation. Unique across the workflow. systemName: System-defined name of the SeqDo operation. Unique across the workflow. userFn: The user function to invoke. """ @encoding.MapUnrecognizedFields('additionalProperties') class UserFnValue(_messages.Message): """The user function to invoke. Messages: AdditionalProperty: An additional property for a UserFnValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a UserFnValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) inputs = _messages.MessageField('SideInputInfo', 1, repeated=True) name = _messages.StringField(2) outputInfos = _messages.MessageField('SeqMapTaskOutputInfo', 3, repeated=True) stageName = _messages.StringField(4) systemName = _messages.StringField(5) userFn = _messages.MessageField('UserFnValue', 6) class SeqMapTaskOutputInfo(_messages.Message): """Information about an output of a SeqMapTask. Fields: sink: The sink to write the output value to. tag: The id of the TupleTag the user code will tag the output value by. """ sink = _messages.MessageField('Sink', 1) tag = _messages.StringField(2) class ShellTask(_messages.Message): """A task which consists of a shell command for the worker to execute. Fields: command: The shell command to run. exitCode: Exit code for the task. """ command = _messages.StringField(1) exitCode = _messages.IntegerField(2, variant=_messages.Variant.INT32) class SideInputInfo(_messages.Message): """Information about a side input of a DoFn or an input of a SeqDoFn. Messages: KindValue: How to interpret the source element(s) as a side input value. Fields: kind: How to interpret the source element(s) as a side input value. sources: The source(s) to read element(s) from to get the value of this side input. If more than one source, then the elements are taken from the sources, in the specified order if order matters. At least one source is required. tag: The id of the tag the user code will access this side input by; this should correspond to the tag of some MultiOutputInfo. """ @encoding.MapUnrecognizedFields('additionalProperties') class KindValue(_messages.Message): """How to interpret the source element(s) as a side input value. Messages: AdditionalProperty: An additional property for a KindValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a KindValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) kind = _messages.MessageField('KindValue', 1) sources = _messages.MessageField('Source', 2, repeated=True) tag = _messages.StringField(3) class Sink(_messages.Message): """A sink that records can be encoded and written to. Messages: CodecValue: The codec to use to encode data written to the sink. SpecValue: The sink to write to, plus its parameters. Fields: codec: The codec to use to encode data written to the sink. spec: The sink to write to, plus its parameters. """ @encoding.MapUnrecognizedFields('additionalProperties') class CodecValue(_messages.Message): """The codec to use to encode data written to the sink. Messages: AdditionalProperty: An additional property for a CodecValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a CodecValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class SpecValue(_messages.Message): """The sink to write to, plus its parameters. Messages: AdditionalProperty: An additional property for a SpecValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a SpecValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) codec = _messages.MessageField('CodecValue', 1) spec = _messages.MessageField('SpecValue', 2) class Source(_messages.Message): """A source that records can be read and decoded from. Messages: BaseSpecsValueListEntry: A BaseSpecsValueListEntry object. CodecValue: The codec to use to decode data read from the source. SpecValue: The source to read from, plus its parameters. Fields: baseSpecs: While splitting, sources may specify the produced bundles as differences against another source, in order to save backend-side memory and allow bigger jobs. For details, see SourceSplitRequest. To support this use case, the full set of parameters of the source is logically obtained by taking the latest explicitly specified value of each parameter in the order: base_specs (later items win), spec (overrides anything in base_specs). codec: The codec to use to decode data read from the source. doesNotNeedSplitting: Setting this value to true hints to the framework that the source doesn't need splitting, and using SourceSplitRequest on it would yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter may set this to true when splitting a single file into a set of byte ranges of appropriate size, and set this to false when splitting a filepattern into individual files. However, for efficiency, a file splitter may decide to produce file subranges directly from the filepattern to avoid a splitting round-trip. See SourceSplitRequest for an overview of the splitting process. This field is meaningful only in the Source objects populated by the user (e.g. when filling in a DerivedSource). Source objects supplied by the framework to the user don't have this field populated. metadata: Optionally, metadata for this source can be supplied right away, avoiding a SourceGetMetadataOperation roundtrip (see SourceOperationRequest). This field is meaningful only in the Source objects populated by the user (e.g. when filling in a DerivedSource). Source objects supplied by the framework to the user don't have this field populated. spec: The source to read from, plus its parameters. """ @encoding.MapUnrecognizedFields('additionalProperties') class BaseSpecsValueListEntry(_messages.Message): """A BaseSpecsValueListEntry object. Messages: AdditionalProperty: An additional property for a BaseSpecsValueListEntry object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a BaseSpecsValueListEntry object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class CodecValue(_messages.Message): """The codec to use to decode data read from the source. Messages: AdditionalProperty: An additional property for a CodecValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a CodecValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class SpecValue(_messages.Message): """The source to read from, plus its parameters. Messages: AdditionalProperty: An additional property for a SpecValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a SpecValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) baseSpecs = _messages.MessageField('BaseSpecsValueListEntry', 1, repeated=True) codec = _messages.MessageField('CodecValue', 2) doesNotNeedSplitting = _messages.BooleanField(3) metadata = _messages.MessageField('SourceMetadata', 4) spec = _messages.MessageField('SpecValue', 5) class SourceFork(_messages.Message): """DEPRECATED in favor of DynamicSourceSplit. Fields: primary: DEPRECATED primarySource: DEPRECATED residual: DEPRECATED residualSource: DEPRECATED """ primary = _messages.MessageField('SourceSplitShard', 1) primarySource = _messages.MessageField('DerivedSource', 2) residual = _messages.MessageField('SourceSplitShard', 3) residualSource = _messages.MessageField('DerivedSource', 4) class SourceGetMetadataRequest(_messages.Message): """A request to compute the SourceMetadata of a Source. Fields: source: Specification of the source whose metadata should be computed. """ source = _messages.MessageField('Source', 1) class SourceGetMetadataResponse(_messages.Message): """The result of a SourceGetMetadataOperation. Fields: metadata: The computed metadata. """ metadata = _messages.MessageField('SourceMetadata', 1) class SourceMetadata(_messages.Message): """Metadata about a Source useful for automatically optimizing and tuning the pipeline, etc. Fields: estimatedSizeBytes: An estimate of the total size (in bytes) of the data that would be read from this source. This estimate is in terms of external storage size, before any decompression or other processing done by the reader. infinite: Specifies that the size of this source is known to be infinite (this is a streaming source). producesSortedKeys: Whether this source is known to produce key/value pairs with the (encoded) keys in lexicographically sorted order. """ estimatedSizeBytes = _messages.IntegerField(1) infinite = _messages.BooleanField(2) producesSortedKeys = _messages.BooleanField(3) class SourceOperationRequest(_messages.Message): """A work item that represents the different operations that can be performed on a user-defined Source specification. Fields: getMetadata: Information about a request to get metadata about a source. name: User-provided name of the Read instruction for this source. originalName: System-defined name for the Read instruction for this source in the original workflow graph. split: Information about a request to split a source. stageName: System-defined name of the stage containing the source operation. Unique across the workflow. systemName: System-defined name of the Read instruction for this source. Unique across the workflow. """ getMetadata = _messages.MessageField('SourceGetMetadataRequest', 1) name = _messages.StringField(2) originalName = _messages.StringField(3) split = _messages.MessageField('SourceSplitRequest', 4) stageName = _messages.StringField(5) systemName = _messages.StringField(6) class SourceOperationResponse(_messages.Message): """The result of a SourceOperationRequest, specified in ReportWorkItemStatusRequest.source_operation when the work item is completed. Fields: getMetadata: A response to a request to get metadata about a source. split: A response to a request to split a source. """ getMetadata = _messages.MessageField('SourceGetMetadataResponse', 1) split = _messages.MessageField('SourceSplitResponse', 2) class SourceSplitOptions(_messages.Message): """Hints for splitting a Source into bundles (parts for parallel processing) using SourceSplitRequest. Fields: desiredBundleSizeBytes: The source should be split into a set of bundles where the estimated size of each is approximately this many bytes. desiredShardSizeBytes: DEPRECATED in favor of desired_bundle_size_bytes. """ desiredBundleSizeBytes = _messages.IntegerField(1) desiredShardSizeBytes = _messages.IntegerField(2) class SourceSplitRequest(_messages.Message): """Represents the operation to split a high-level Source specification into bundles (parts for parallel processing). At a high level, splitting of a source into bundles happens as follows: SourceSplitRequest is applied to the source. If it returns SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and the source is used "as is". Otherwise, splitting is applied recursively to each produced DerivedSource. As an optimization, for any Source, if its does_not_need_splitting is true, the framework assumes that splitting this source would return SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a SourceSplitRequest. This applies both to the initial source being split and to bundles produced from it. Fields: options: Hints for tuning the splitting process. source: Specification of the source to be split. """ options = _messages.MessageField('SourceSplitOptions', 1) source = _messages.MessageField('Source', 2) class SourceSplitResponse(_messages.Message): """The response to a SourceSplitRequest. Enums: OutcomeValueValuesEnum: Indicates whether splitting happened and produced a list of bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed "as is" without splitting. "bundles" is ignored in this case. If this is SPLITTING_HAPPENED, then "bundles" contains a list of bundles into which the source was split. Fields: bundles: If outcome is SPLITTING_HAPPENED, then this is a list of bundles into which the source was split. Otherwise this field is ignored. This list can be empty, which means the source represents an empty input. outcome: Indicates whether splitting happened and produced a list of bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed "as is" without splitting. "bundles" is ignored in this case. If this is SPLITTING_HAPPENED, then "bundles" contains a list of bundles into which the source was split. shards: DEPRECATED in favor of bundles. """ class OutcomeValueValuesEnum(_messages.Enum): """Indicates whether splitting happened and produced a list of bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed "as is" without splitting. "bundles" is ignored in this case. If this is SPLITTING_HAPPENED, then "bundles" contains a list of bundles into which the source was split. Values: SOURCE_SPLIT_OUTCOME_UNKNOWN: The source split outcome is unknown, or unspecified. SOURCE_SPLIT_OUTCOME_USE_CURRENT: The current source should be processed "as is" without splitting. SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED: Splitting produced a list of bundles. """ SOURCE_SPLIT_OUTCOME_UNKNOWN = 0 SOURCE_SPLIT_OUTCOME_USE_CURRENT = 1 SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED = 2 bundles = _messages.MessageField('DerivedSource', 1, repeated=True) outcome = _messages.EnumField('OutcomeValueValuesEnum', 2) shards = _messages.MessageField('SourceSplitShard', 3, repeated=True) class SourceSplitShard(_messages.Message): """DEPRECATED in favor of DerivedSource. Enums: DerivationModeValueValuesEnum: DEPRECATED Fields: derivationMode: DEPRECATED source: DEPRECATED """ class DerivationModeValueValuesEnum(_messages.Enum): """DEPRECATED Values: SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or unspecified. SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent Source with no base. SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the Source being split. SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the base of the Source being split. """ SOURCE_DERIVATION_MODE_UNKNOWN = 0 SOURCE_DERIVATION_MODE_INDEPENDENT = 1 SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2 SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3 derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1) source = _messages.MessageField('Source', 2) class SplitInt64(_messages.Message): """A representation of an int64, n, that is immune to precision loss when encoded in JSON. Fields: highBits: The high order bits, including the sign: n >> 32. lowBits: The low order bits: n & 0xffffffff. """ highBits = _messages.IntegerField(1, variant=_messages.Variant.INT32) lowBits = _messages.IntegerField(2, variant=_messages.Variant.UINT32) class StageSource(_messages.Message): """Description of an input or output of an execution stage. Fields: name: Dataflow service generated name for this source. originalTransformOrCollection: User name for the original user transform or collection with which this source is most closely associated. sizeBytes: Size of the source, if measurable. userName: Human-readable name for this source; may be user or system generated. """ name = _messages.StringField(1) originalTransformOrCollection = _messages.StringField(2) sizeBytes = _messages.IntegerField(3) userName = _messages.StringField(4) class StandardQueryParameters(_messages.Message): """Query parameters accepted by all methods. Enums: FXgafvValueValuesEnum: V1 error format. AltValueValuesEnum: Data format for response. Fields: f__xgafv: V1 error format. access_token: OAuth access token. alt: Data format for response. bearer_token: OAuth bearer token. callback: JSONP fields: Selector specifying which fields to include in a partial response. key: API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. oauth_token: OAuth 2.0 token for the current user. pp: Pretty-print response. prettyPrint: Returns response with indentations and line breaks. quotaUser: Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. trace: A tracing token of the form "token:<tokenid>" to include in api requests. uploadType: Legacy upload protocol for media (e.g. "media", "multipart"). upload_protocol: Upload protocol for media (e.g. "raw", "multipart"). """ class AltValueValuesEnum(_messages.Enum): """Data format for response. Values: json: Responses with Content-Type of application/json media: Media download with context-dependent Content-Type proto: Responses with Content-Type of application/x-protobuf """ json = 0 media = 1 proto = 2 class FXgafvValueValuesEnum(_messages.Enum): """V1 error format. Values: _1: v1 error format _2: v2 error format """ _1 = 0 _2 = 1 f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1) access_token = _messages.StringField(2) alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json') bearer_token = _messages.StringField(4) callback = _messages.StringField(5) fields = _messages.StringField(6) key = _messages.StringField(7) oauth_token = _messages.StringField(8) pp = _messages.BooleanField(9, default=True) prettyPrint = _messages.BooleanField(10, default=True) quotaUser = _messages.StringField(11) trace = _messages.StringField(12) uploadType = _messages.StringField(13) upload_protocol = _messages.StringField(14) class StateFamilyConfig(_messages.Message): """State family configuration. Fields: isRead: If true, this family corresponds to a read operation. stateFamily: The state family value. """ isRead = _messages.BooleanField(1) stateFamily = _messages.StringField(2) class Status(_messages.Message): """The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). The error model is designed to be: - Simple to use and understand for most users - Flexible enough to meet unexpected needs # Overview The `Status` message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers *understand* and *resolve* the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package `google.rpc` that can be used for common error conditions. # Language mapping The `Status` message is the logical representation of the error model, but it is not necessarily the actual wire format. When the `Status` message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C. # Other uses The error model and the `Status` message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments. Example uses of this error model include: - Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors. - Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting. - Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub- response. - Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message. - Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons. Messages: DetailsValueListEntry: A DetailsValueListEntry object. Fields: code: The status code, which should be an enum value of google.rpc.Code. details: A list of messages that carry the error details. There is a common set of message types for APIs to use. message: A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. """ @encoding.MapUnrecognizedFields('additionalProperties') class DetailsValueListEntry(_messages.Message): """A DetailsValueListEntry object. Messages: AdditionalProperty: An additional property for a DetailsValueListEntry object. Fields: additionalProperties: Properties of the object. Contains field @type with type URL. """ class AdditionalProperty(_messages.Message): """An additional property for a DetailsValueListEntry object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) code = _messages.IntegerField(1, variant=_messages.Variant.INT32) details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True) message = _messages.StringField(3) class Step(_messages.Message): """Defines a particular step within a Cloud Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. Note that the Cloud Dataflow service may be used to run many different types of jobs, not just Map-Reduce. Messages: PropertiesValue: Named properties associated with the step. Each kind of predefined step has its own required set of properties. Must be provided on Create. Only retrieved with JOB_VIEW_ALL. Fields: kind: The kind of step in the Cloud Dataflow job. name: The name that identifies the step. This must be unique for each step with respect to all other steps in the Cloud Dataflow job. properties: Named properties associated with the step. Each kind of predefined step has its own required set of properties. Must be provided on Create. Only retrieved with JOB_VIEW_ALL. """ @encoding.MapUnrecognizedFields('additionalProperties') class PropertiesValue(_messages.Message): """Named properties associated with the step. Each kind of predefined step has its own required set of properties. Must be provided on Create. Only retrieved with JOB_VIEW_ALL. Messages: AdditionalProperty: An additional property for a PropertiesValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a PropertiesValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) kind = _messages.StringField(1) name = _messages.StringField(2) properties = _messages.MessageField('PropertiesValue', 3) class StreamLocation(_messages.Message): """Describes a stream of data, either as input to be processed or as output of a streaming Dataflow job. Fields: customSourceLocation: The stream is a custom source. pubsubLocation: The stream is a pubsub stream. sideInputLocation: The stream is a streaming side input. streamingStageLocation: The stream is part of another computation within the current streaming Dataflow job. """ customSourceLocation = _messages.MessageField('CustomSourceLocation', 1) pubsubLocation = _messages.MessageField('PubsubLocation', 2) sideInputLocation = _messages.MessageField('StreamingSideInputLocation', 3) streamingStageLocation = _messages.MessageField('StreamingStageLocation', 4) class StreamingComputationConfig(_messages.Message): """Configuration information for a single streaming computation. Fields: computationId: Unique identifier for this computation. instructions: Instructions that comprise the computation. stageName: Stage name of this computation. systemName: System defined name for this computation. """ computationId = _messages.StringField(1) instructions = _messages.MessageField('ParallelInstruction', 2, repeated=True) stageName = _messages.StringField(3) systemName = _messages.StringField(4) class StreamingComputationRanges(_messages.Message): """Describes full or partial data disk assignment information of the computation ranges. Fields: computationId: The ID of the computation. rangeAssignments: Data disk assignments for ranges from this computation. """ computationId = _messages.StringField(1) rangeAssignments = _messages.MessageField('KeyRangeDataDiskAssignment', 2, repeated=True) class StreamingComputationTask(_messages.Message): """A task which describes what action should be performed for the specified streaming computation ranges. Enums: TaskTypeValueValuesEnum: A type of streaming computation task. Fields: computationRanges: Contains ranges of a streaming computation this task should apply to. dataDisks: Describes the set of data disks this task should apply to. taskType: A type of streaming computation task. """ class TaskTypeValueValuesEnum(_messages.Enum): """A type of streaming computation task. Values: STREAMING_COMPUTATION_TASK_UNKNOWN: The streaming computation task is unknown, or unspecified. STREAMING_COMPUTATION_TASK_STOP: Stop processing specified streaming computation range(s). STREAMING_COMPUTATION_TASK_START: Start processing specified streaming computation range(s). """ STREAMING_COMPUTATION_TASK_UNKNOWN = 0 STREAMING_COMPUTATION_TASK_STOP = 1 STREAMING_COMPUTATION_TASK_START = 2 computationRanges = _messages.MessageField('StreamingComputationRanges', 1, repeated=True) dataDisks = _messages.MessageField('MountedDataDisk', 2, repeated=True) taskType = _messages.EnumField('TaskTypeValueValuesEnum', 3) class StreamingConfigTask(_messages.Message): """A task that carries configuration information for streaming computations. Messages: UserStepToStateFamilyNameMapValue: Map from user step names to state families. Fields: streamingComputationConfigs: Set of computation configuration information. userStepToStateFamilyNameMap: Map from user step names to state families. windmillServiceEndpoint: If present, the worker must use this endpoint to communicate with Windmill Service dispatchers, otherwise the worker must continue to use whatever endpoint it had been using. windmillServicePort: If present, the worker must use this port to communicate with Windmill Service dispatchers. Only applicable when windmill_service_endpoint is specified. """ @encoding.MapUnrecognizedFields('additionalProperties') class UserStepToStateFamilyNameMapValue(_messages.Message): """Map from user step names to state families. Messages: AdditionalProperty: An additional property for a UserStepToStateFamilyNameMapValue object. Fields: additionalProperties: Additional properties of type UserStepToStateFamilyNameMapValue """ class AdditionalProperty(_messages.Message): """An additional property for a UserStepToStateFamilyNameMapValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) streamingComputationConfigs = _messages.MessageField('StreamingComputationConfig', 1, repeated=True) userStepToStateFamilyNameMap = _messages.MessageField('UserStepToStateFamilyNameMapValue', 2) windmillServiceEndpoint = _messages.StringField(3) windmillServicePort = _messages.IntegerField(4) class StreamingSetupTask(_messages.Message): """A task which initializes part of a streaming Dataflow job. Fields: drain: The user has requested drain. receiveWorkPort: The TCP port on which the worker should listen for messages from other streaming computation workers. streamingComputationTopology: The global topology of the streaming Dataflow job. workerHarnessPort: The TCP port used by the worker to communicate with the Dataflow worker harness. """ drain = _messages.BooleanField(1) receiveWorkPort = _messages.IntegerField(2, variant=_messages.Variant.INT32) streamingComputationTopology = _messages.MessageField('TopologyConfig', 3) workerHarnessPort = _messages.IntegerField(4, variant=_messages.Variant.INT32) class StreamingSideInputLocation(_messages.Message): """Identifies the location of a streaming side input. Fields: stateFamily: Identifies the state family where this side input is stored. tag: Identifies the particular side input within the streaming Dataflow job. """ stateFamily = _messages.StringField(1) tag = _messages.StringField(2) class StreamingStageLocation(_messages.Message): """Identifies the location of a streaming computation stage, for stage-to- stage communication. Fields: streamId: Identifies the particular stream within the streaming Dataflow job. """ streamId = _messages.StringField(1) class StringList(_messages.Message): """A metric value representing a list of strings. Fields: elements: Elements of the list. """ elements = _messages.StringField(1, repeated=True) class StructuredMessage(_messages.Message): """A rich message format, including a human readable string, a key for identifying the message, and structured data associated with the message for programmatic consumption. Fields: messageKey: Idenfier for this message type. Used by external systems to internationalize or personalize message. messageText: Human-readable version of message. parameters: The structured data associated with this message. """ messageKey = _messages.StringField(1) messageText = _messages.StringField(2) parameters = _messages.MessageField('Parameter', 3, repeated=True) class TaskRunnerSettings(_messages.Message): """Taskrunner configuration settings. Fields: alsologtostderr: Whether to also send taskrunner log info to stderr. baseTaskDir: The location on the worker for task-specific subdirectories. baseUrl: The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/" commandlinesFileName: The file to store preprocessing commands in. continueOnException: Whether to continue taskrunner if an exception is hit. dataflowApiVersion: The API version of endpoint, e.g. "v1b3" harnessCommand: The command to launch the worker harness. languageHint: The suggested backend language. logDir: The directory on the VM to store logs. logToSerialconsole: Whether to send taskrunner log info to Google Compute Engine VM serial console. logUploadLocation: Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} oauthScopes: The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API. parallelWorkerSettings: The settings to pass to the parallel worker harness. streamingWorkerMainClass: The streaming worker main class name. taskGroup: The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel". taskUser: The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root". tempStoragePrefix: The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} vmId: The ID string of the VM. workflowFileName: The file to store the workflow in. """ alsologtostderr = _messages.BooleanField(1) baseTaskDir = _messages.StringField(2) baseUrl = _messages.StringField(3) commandlinesFileName = _messages.StringField(4) continueOnException = _messages.BooleanField(5) dataflowApiVersion = _messages.StringField(6) harnessCommand = _messages.StringField(7) languageHint = _messages.StringField(8) logDir = _messages.StringField(9) logToSerialconsole = _messages.BooleanField(10) logUploadLocation = _messages.StringField(11) oauthScopes = _messages.StringField(12, repeated=True) parallelWorkerSettings = _messages.MessageField('WorkerSettings', 13) streamingWorkerMainClass = _messages.StringField(14) taskGroup = _messages.StringField(15) taskUser = _messages.StringField(16) tempStoragePrefix = _messages.StringField(17) vmId = _messages.StringField(18) workflowFileName = _messages.StringField(19) class TemplateMetadata(_messages.Message): """Metadata describing a template. Fields: description: Optional. A description of the template. name: Required. The name of the template. parameters: The parameters for the template. """ description = _messages.StringField(1) name = _messages.StringField(2) parameters = _messages.MessageField('ParameterMetadata', 3, repeated=True) class TopologyConfig(_messages.Message): """Global topology of the streaming Dataflow job, including all computations and their sharded locations. Messages: UserStageToComputationNameMapValue: Maps user stage names to stable computation names. Fields: computations: The computations associated with a streaming Dataflow job. dataDiskAssignments: The disks assigned to a streaming Dataflow job. forwardingKeyBits: The size (in bits) of keys that will be assigned to source messages. persistentStateVersion: Version number for persistent state. userStageToComputationNameMap: Maps user stage names to stable computation names. """ @encoding.MapUnrecognizedFields('additionalProperties') class UserStageToComputationNameMapValue(_messages.Message): """Maps user stage names to stable computation names. Messages: AdditionalProperty: An additional property for a UserStageToComputationNameMapValue object. Fields: additionalProperties: Additional properties of type UserStageToComputationNameMapValue """ class AdditionalProperty(_messages.Message): """An additional property for a UserStageToComputationNameMapValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) computations = _messages.MessageField('ComputationTopology', 1, repeated=True) dataDiskAssignments = _messages.MessageField('DataDiskAssignment', 2, repeated=True) forwardingKeyBits = _messages.IntegerField(3, variant=_messages.Variant.INT32) persistentStateVersion = _messages.IntegerField(4, variant=_messages.Variant.INT32) userStageToComputationNameMap = _messages.MessageField('UserStageToComputationNameMapValue', 5) class TransformSummary(_messages.Message): """Description of the type, names/ids, and input/outputs for a transform. Enums: KindValueValuesEnum: Type of transform. Fields: displayData: Transform-specific display data. id: SDK generated id of this transform instance. inputCollectionName: User names for all collection inputs to this transform. kind: Type of transform. name: User provided name for this transform instance. outputCollectionName: User names for all collection outputs to this transform. """ class KindValueValuesEnum(_messages.Enum): """Type of transform. Values: UNKNOWN_KIND: Unrecognized transform type. PAR_DO_KIND: ParDo transform. GROUP_BY_KEY_KIND: Group By Key transform. FLATTEN_KIND: Flatten transform. READ_KIND: Read transform. WRITE_KIND: Write transform. CONSTANT_KIND: Constructs from a constant value, such as with Create.of. SINGLETON_KIND: Creates a Singleton view of a collection. SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a GroupByKey. """ UNKNOWN_KIND = 0 PAR_DO_KIND = 1 GROUP_BY_KEY_KIND = 2 FLATTEN_KIND = 3 READ_KIND = 4 WRITE_KIND = 5 CONSTANT_KIND = 6 SINGLETON_KIND = 7 SHUFFLE_KIND = 8 displayData = _messages.MessageField('DisplayData', 1, repeated=True) id = _messages.StringField(2) inputCollectionName = _messages.StringField(3, repeated=True) kind = _messages.EnumField('KindValueValuesEnum', 4) name = _messages.StringField(5) outputCollectionName = _messages.StringField(6, repeated=True) class WorkItem(_messages.Message): """WorkItem represents basic information about a WorkItem to be executed in the cloud. Fields: configuration: Work item-specific configuration as an opaque blob. id: Identifies this WorkItem. initialReportIndex: The initial index to use when reporting the status of the WorkItem. jobId: Identifies the workflow job this WorkItem belongs to. leaseExpireTime: Time when the lease on this Work will expire. mapTask: Additional information for MapTask WorkItems. packages: Any required packages that need to be fetched in order to execute this WorkItem. projectId: Identifies the cloud project this WorkItem belongs to. reportStatusInterval: Recommended reporting interval. seqMapTask: Additional information for SeqMapTask WorkItems. shellTask: Additional information for ShellTask WorkItems. sourceOperationTask: Additional information for source operation WorkItems. streamingComputationTask: Additional information for StreamingComputationTask WorkItems. streamingConfigTask: Additional information for StreamingConfigTask WorkItems. streamingSetupTask: Additional information for StreamingSetupTask WorkItems. """ configuration = _messages.StringField(1) id = _messages.IntegerField(2) initialReportIndex = _messages.IntegerField(3) jobId = _messages.StringField(4) leaseExpireTime = _messages.StringField(5) mapTask = _messages.MessageField('MapTask', 6) packages = _messages.MessageField('Package', 7, repeated=True) projectId = _messages.StringField(8) reportStatusInterval = _messages.StringField(9) seqMapTask = _messages.MessageField('SeqMapTask', 10) shellTask = _messages.MessageField('ShellTask', 11) sourceOperationTask = _messages.MessageField('SourceOperationRequest', 12) streamingComputationTask = _messages.MessageField('StreamingComputationTask', 13) streamingConfigTask = _messages.MessageField('StreamingConfigTask', 14) streamingSetupTask = _messages.MessageField('StreamingSetupTask', 15) class WorkItemServiceState(_messages.Message): """The Dataflow service's idea of the current state of a WorkItem being processed by a worker. Messages: HarnessDataValue: Other data returned by the service, specific to the particular worker harness. Fields: harnessData: Other data returned by the service, specific to the particular worker harness. leaseExpireTime: Time at which the current lease will expire. metricShortId: The short ids that workers should use in subsequent metric updates. Workers should strive to use short ids whenever possible, but it is ok to request the short_id again if a worker lost track of it (e.g. if the worker is recovering from a crash). NOTE: it is possible that the response may have short ids for a subset of the metrics. nextReportIndex: The index value to use for the next report sent by the worker. Note: If the report call fails for whatever reason, the worker should reuse this index for subsequent report attempts. reportStatusInterval: New recommended reporting interval. splitRequest: The progress point in the WorkItem where the Dataflow service suggests that the worker truncate the task. suggestedStopPoint: DEPRECATED in favor of split_request. suggestedStopPosition: Obsolete, always empty. """ @encoding.MapUnrecognizedFields('additionalProperties') class HarnessDataValue(_messages.Message): """Other data returned by the service, specific to the particular worker harness. Messages: AdditionalProperty: An additional property for a HarnessDataValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a HarnessDataValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) harnessData = _messages.MessageField('HarnessDataValue', 1) leaseExpireTime = _messages.StringField(2) metricShortId = _messages.MessageField('MetricShortId', 3, repeated=True) nextReportIndex = _messages.IntegerField(4) reportStatusInterval = _messages.StringField(5) splitRequest = _messages.MessageField('ApproximateSplitRequest', 6) suggestedStopPoint = _messages.MessageField('ApproximateProgress', 7) suggestedStopPosition = _messages.MessageField('Position', 8) class WorkItemStatus(_messages.Message): """Conveys a worker's progress through the work described by a WorkItem. Fields: completed: True if the WorkItem was completed (successfully or unsuccessfully). counterUpdates: Worker output counters for this WorkItem. dynamicSourceSplit: See documentation of stop_position. errors: Specifies errors which occurred during processing. If errors are provided, and completed = true, then the WorkItem is considered to have failed. metricUpdates: DEPRECATED in favor of counter_updates. progress: DEPRECATED in favor of reported_progress. reportIndex: The report index. When a WorkItem is leased, the lease will contain an initial report index. When a WorkItem's status is reported to the system, the report should be sent with that report index, and the response will contain the index the worker should use for the next report. Reports received with unexpected index values will be rejected by the service. In order to preserve idempotency, the worker should not alter the contents of a report, even if the worker must submit the same report multiple times before getting back a response. The worker should not submit a subsequent report until the response for the previous report had been received from the service. reportedProgress: The worker's progress through this WorkItem. requestedLeaseDuration: Amount of time the worker requests for its lease. sourceFork: DEPRECATED in favor of dynamic_source_split. sourceOperationResponse: If the work item represented a SourceOperationRequest, and the work is completed, contains the result of the operation. stopPosition: A worker may split an active map task in two parts, "primary" and "residual", continuing to process the primary part and returning the residual part into the pool of available work. This event is called a "dynamic split" and is critical to the dynamic work rebalancing feature. The two obtained sub-tasks are called "parts" of the split. The parts, if concatenated, must represent the same input as would be read by the current task if the split did not happen. The exact way in which the original task is decomposed into the two parts is specified either as a position demarcating them (stop_position), or explicitly as two DerivedSources, if this task consumes a user-defined source type (dynamic_source_split). The "current" task is adjusted as a result of the split: after a task with range [A, B) sends a stop_position update at C, its range is considered to be [A, C), e.g.: * Progress should be interpreted relative to the new range, e.g. "75% completed" means "75% of [A, C) completed" * The worker should interpret proposed_stop_position relative to the new range, e.g. "split at 68%" should be interpreted as "split at 68% of [A, C)". * If the worker chooses to split again using stop_position, only stop_positions in [A, C) will be accepted. * Etc. dynamic_source_split has similar semantics: e.g., if a task with source S splits using dynamic_source_split into {P, R} (where P and R must be together equivalent to S), then subsequent progress and proposed_stop_position should be interpreted relative to P, and in a potential subsequent dynamic_source_split into {P', R'}, P' and R' must be together equivalent to P, etc. totalThrottlerWaitTimeSeconds: Total time the worker spent being throttled by external systems. workItemId: Identifies the WorkItem. """ completed = _messages.BooleanField(1) counterUpdates = _messages.MessageField('CounterUpdate', 2, repeated=True) dynamicSourceSplit = _messages.MessageField('DynamicSourceSplit', 3) errors = _messages.MessageField('Status', 4, repeated=True) metricUpdates = _messages.MessageField('MetricUpdate', 5, repeated=True) progress = _messages.MessageField('ApproximateProgress', 6) reportIndex = _messages.IntegerField(7) reportedProgress = _messages.MessageField('ApproximateReportedProgress', 8) requestedLeaseDuration = _messages.StringField(9) sourceFork = _messages.MessageField('SourceFork', 10) sourceOperationResponse = _messages.MessageField('SourceOperationResponse', 11) stopPosition = _messages.MessageField('Position', 12) totalThrottlerWaitTimeSeconds = _messages.FloatField(13) workItemId = _messages.StringField(14) class WorkerHealthReport(_messages.Message): """WorkerHealthReport contains information about the health of a worker. The VM should be identified by the labels attached to the WorkerMessage that this health ping belongs to. Messages: PodsValueListEntry: A PodsValueListEntry object. Fields: pods: The pods running on the worker. See: http://kubernetes.io/v1.1/docs /api-reference/v1/definitions.html#_v1_pod This field is used by the worker to send the status of the indvidual containers running on each worker. reportInterval: The interval at which the worker is sending health reports. The default value of 0 should be interpreted as the field is not being explicitly set by the worker. vmIsHealthy: Whether the VM is healthy. vmStartupTime: The time the VM was booted. """ @encoding.MapUnrecognizedFields('additionalProperties') class PodsValueListEntry(_messages.Message): """A PodsValueListEntry object. Messages: AdditionalProperty: An additional property for a PodsValueListEntry object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a PodsValueListEntry object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) pods = _messages.MessageField('PodsValueListEntry', 1, repeated=True) reportInterval = _messages.StringField(2) vmIsHealthy = _messages.BooleanField(3) vmStartupTime = _messages.StringField(4) class WorkerHealthReportResponse(_messages.Message): """WorkerHealthReportResponse contains information returned to the worker in response to a health ping. Fields: reportInterval: A positive value indicates the worker should change its reporting interval to the specified value. The default value of zero means no change in report rate is requested by the server. """ reportInterval = _messages.StringField(1) class WorkerLifecycleEvent(_messages.Message): """A report of an event in a worker's lifecycle. The proto contains one event, because the worker is expected to asynchronously send each message immediately after the event. Due to this asynchrony, messages may arrive out of order (or missing), and it is up to the consumer to interpret. The timestamp of the event is in the enclosing WorkerMessage proto. Enums: EventValueValuesEnum: The event being reported. Messages: MetadataValue: Other stats that can accompany an event. E.g. { "downloaded_bytes" : "123456" } Fields: containerStartTime: The start time of this container. All events will report this so that events can be grouped together across container/VM restarts. event: The event being reported. metadata: Other stats that can accompany an event. E.g. { "downloaded_bytes" : "123456" } """ class EventValueValuesEnum(_messages.Enum): """The event being reported. Values: UNKNOWN_EVENT: Invalid event. OS_START: The time the VM started. CONTAINER_START: Our container code starts running. Multiple containers could be distinguished with WorkerMessage.labels if desired. NETWORK_UP: The worker has a functional external network connection. STAGING_FILES_DOWNLOAD_START: Started downloading staging files. STAGING_FILES_DOWNLOAD_FINISH: Finished downloading all staging files. SDK_INSTALL_START: For applicable SDKs, started installation of SDK and worker packages. SDK_INSTALL_FINISH: Finished installing SDK. """ UNKNOWN_EVENT = 0 OS_START = 1 CONTAINER_START = 2 NETWORK_UP = 3 STAGING_FILES_DOWNLOAD_START = 4 STAGING_FILES_DOWNLOAD_FINISH = 5 SDK_INSTALL_START = 6 SDK_INSTALL_FINISH = 7 @encoding.MapUnrecognizedFields('additionalProperties') class MetadataValue(_messages.Message): """Other stats that can accompany an event. E.g. { "downloaded_bytes" : "123456" } Messages: AdditionalProperty: An additional property for a MetadataValue object. Fields: additionalProperties: Additional properties of type MetadataValue """ class AdditionalProperty(_messages.Message): """An additional property for a MetadataValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) containerStartTime = _messages.StringField(1) event = _messages.EnumField('EventValueValuesEnum', 2) metadata = _messages.MessageField('MetadataValue', 3) class WorkerMessage(_messages.Message): """WorkerMessage provides information to the backend about a worker. Messages: LabelsValue: Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { "JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026" "CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here. Fields: labels: Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { "JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026" "CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here. time: The timestamp of the worker_message. workerHealthReport: The health of a worker. workerLifecycleEvent: Record of worker lifecycle events. workerMessageCode: A worker message code. workerMetrics: Resource metrics reported by workers. workerShutdownNotice: Shutdown notice by workers. """ @encoding.MapUnrecognizedFields('additionalProperties') class LabelsValue(_messages.Message): """Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { "JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026" "CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here. Messages: AdditionalProperty: An additional property for a LabelsValue object. Fields: additionalProperties: Additional properties of type LabelsValue """ class AdditionalProperty(_messages.Message): """An additional property for a LabelsValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) labels = _messages.MessageField('LabelsValue', 1) time = _messages.StringField(2) workerHealthReport = _messages.MessageField('WorkerHealthReport', 3) workerLifecycleEvent = _messages.MessageField('WorkerLifecycleEvent', 4) workerMessageCode = _messages.MessageField('WorkerMessageCode', 5) workerMetrics = _messages.MessageField('ResourceUtilizationReport', 6) workerShutdownNotice = _messages.MessageField('WorkerShutdownNotice', 7) class WorkerMessageCode(_messages.Message): """A message code is used to report status and error messages to the service. The message codes are intended to be machine readable. The service will take care of translating these into user understandable messages if necessary. Example use cases: 1. Worker processes reporting successful startup. 2. Worker processes reporting specific errors (e.g. package staging failure). Messages: ParametersValue: Parameters contains specific information about the code. This is a struct to allow parameters of different types. Examples: 1. For a "HARNESS_STARTED" message parameters might provide the name of the worker and additional data like timing information. 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS objects being downloaded and fields containing errors. In general complex data structures should be avoided. If a worker needs to send a specific and complicated data structure then please consider defining a new proto and adding it to the data oneof in WorkerMessageResponse. Conventions: Parameters should only be used for information that isn't typically passed as a label. hostname and other worker identifiers should almost always be passed as labels since they will be included on most messages. Fields: code: The code is a string intended for consumption by a machine that identifies the type of message being sent. Examples: 1. "HARNESS_STARTED" might be used to indicate the worker harness has started. 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error downloading a GCS file as part of the boot process of one of the worker containers. This is a string and not an enum to make it easy to add new codes without waiting for an API change. parameters: Parameters contains specific information about the code. This is a struct to allow parameters of different types. Examples: 1. For a "HARNESS_STARTED" message parameters might provide the name of the worker and additional data like timing information. 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS objects being downloaded and fields containing errors. In general complex data structures should be avoided. If a worker needs to send a specific and complicated data structure then please consider defining a new proto and adding it to the data oneof in WorkerMessageResponse. Conventions: Parameters should only be used for information that isn't typically passed as a label. hostname and other worker identifiers should almost always be passed as labels since they will be included on most messages. """ @encoding.MapUnrecognizedFields('additionalProperties') class ParametersValue(_messages.Message): """Parameters contains specific information about the code. This is a struct to allow parameters of different types. Examples: 1. For a "HARNESS_STARTED" message parameters might provide the name of the worker and additional data like timing information. 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS objects being downloaded and fields containing errors. In general complex data structures should be avoided. If a worker needs to send a specific and complicated data structure then please consider defining a new proto and adding it to the data oneof in WorkerMessageResponse. Conventions: Parameters should only be used for information that isn't typically passed as a label. hostname and other worker identifiers should almost always be passed as labels since they will be included on most messages. Messages: AdditionalProperty: An additional property for a ParametersValue object. Fields: additionalProperties: Properties of the object. """ class AdditionalProperty(_messages.Message): """An additional property for a ParametersValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) code = _messages.StringField(1) parameters = _messages.MessageField('ParametersValue', 2) class WorkerMessageResponse(_messages.Message): """A worker_message response allows the server to pass information to the sender. Fields: workerHealthReportResponse: The service's response to a worker's health report. workerMetricsResponse: Service's response to reporting worker metrics (currently empty). workerShutdownNoticeResponse: Service's response to shutdown notice (currently empty). """ workerHealthReportResponse = _messages.MessageField('WorkerHealthReportResponse', 1) workerMetricsResponse = _messages.MessageField('ResourceUtilizationReportResponse', 2) workerShutdownNoticeResponse = _messages.MessageField('WorkerShutdownNoticeResponse', 3) class WorkerPool(_messages.Message): """Describes one particular pool of Cloud Dataflow workers to be instantiated by the Cloud Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job. Enums: DefaultPackageSetValueValuesEnum: The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language. IpConfigurationValueValuesEnum: Configuration for VM IPs. TeardownPolicyValueValuesEnum: Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default. Messages: MetadataValue: Metadata to set on the Google Compute Engine VMs. PoolArgsValue: Extra arguments for this worker pool. Fields: autoscalingSettings: Settings for autoscaling of this WorkerPool. dataDisks: Data disks that are used by a VM in this workflow. defaultPackageSet: The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language. diskSizeGb: Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default. diskSourceImage: Fully qualified source image for disks. diskType: Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default. ipConfiguration: Configuration for VM IPs. kind: The kind of the worker pool; currently only `harness` and `shuffle` are supported. machineType: Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default. metadata: Metadata to set on the Google Compute Engine VMs. network: Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default". numThreadsPerWorker: The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming). numWorkers: Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default. onHostMaintenance: The action to take on host maintenance, as defined by the Google Compute Engine API. packages: Packages to be installed on workers. poolArgs: Extra arguments for this worker pool. subnetwork: Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK". taskrunnerSettings: Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field. teardownPolicy: Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default. workerHarnessContainerImage: Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. zone: Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default. """ class DefaultPackageSetValueValuesEnum(_messages.Enum): """The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language. Values: DEFAULT_PACKAGE_SET_UNKNOWN: The default set of packages to stage is unknown, or unspecified. DEFAULT_PACKAGE_SET_NONE: Indicates that no packages should be staged at the worker unless explicitly specified by the job. DEFAULT_PACKAGE_SET_JAVA: Stage packages typically useful to workers written in Java. DEFAULT_PACKAGE_SET_PYTHON: Stage pacakges typically useful to workers written in Python. """ DEFAULT_PACKAGE_SET_UNKNOWN = 0 DEFAULT_PACKAGE_SET_NONE = 1 DEFAULT_PACKAGE_SET_JAVA = 2 DEFAULT_PACKAGE_SET_PYTHON = 3 class IpConfigurationValueValuesEnum(_messages.Enum): """Configuration for VM IPs. Values: WORKER_IP_UNSPECIFIED: The configuration is unknown, or unspecified. WORKER_IP_PUBLIC: Workers should have public IP addresses. WORKER_IP_PRIVATE: Workers should have private IP addresses. """ WORKER_IP_UNSPECIFIED = 0 WORKER_IP_PUBLIC = 1 WORKER_IP_PRIVATE = 2 class TeardownPolicyValueValuesEnum(_messages.Enum): """Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default. Values: TEARDOWN_POLICY_UNKNOWN: The teardown policy isn't specified, or is unknown. TEARDOWN_ALWAYS: Always teardown the resource. TEARDOWN_ON_SUCCESS: Teardown the resource on success. This is useful for debugging failures. TEARDOWN_NEVER: Never teardown the resource. This is useful for debugging and development. """ TEARDOWN_POLICY_UNKNOWN = 0 TEARDOWN_ALWAYS = 1 TEARDOWN_ON_SUCCESS = 2 TEARDOWN_NEVER = 3 @encoding.MapUnrecognizedFields('additionalProperties') class MetadataValue(_messages.Message): """Metadata to set on the Google Compute Engine VMs. Messages: AdditionalProperty: An additional property for a MetadataValue object. Fields: additionalProperties: Additional properties of type MetadataValue """ class AdditionalProperty(_messages.Message): """An additional property for a MetadataValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class PoolArgsValue(_messages.Message): """Extra arguments for this worker pool. Messages: AdditionalProperty: An additional property for a PoolArgsValue object. Fields: additionalProperties: Properties of the object. Contains field @type with type URL. """ class AdditionalProperty(_messages.Message): """An additional property for a PoolArgsValue object. Fields: key: Name of the additional property. value: A extra_types.JsonValue attribute. """ key = _messages.StringField(1) value = _messages.MessageField('extra_types.JsonValue', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) autoscalingSettings = _messages.MessageField('AutoscalingSettings', 1) dataDisks = _messages.MessageField('Disk', 2, repeated=True) defaultPackageSet = _messages.EnumField('DefaultPackageSetValueValuesEnum', 3) diskSizeGb = _messages.IntegerField(4, variant=_messages.Variant.INT32) diskSourceImage = _messages.StringField(5) diskType = _messages.StringField(6) ipConfiguration = _messages.EnumField('IpConfigurationValueValuesEnum', 7) kind = _messages.StringField(8) machineType = _messages.StringField(9) metadata = _messages.MessageField('MetadataValue', 10) network = _messages.StringField(11) numThreadsPerWorker = _messages.IntegerField(12, variant=_messages.Variant.INT32) numWorkers = _messages.IntegerField(13, variant=_messages.Variant.INT32) onHostMaintenance = _messages.StringField(14) packages = _messages.MessageField('Package', 15, repeated=True) poolArgs = _messages.MessageField('PoolArgsValue', 16) subnetwork = _messages.StringField(17) taskrunnerSettings = _messages.MessageField('TaskRunnerSettings', 18) teardownPolicy = _messages.EnumField('TeardownPolicyValueValuesEnum', 19) workerHarnessContainerImage = _messages.StringField(20) zone = _messages.StringField(21) class WorkerSettings(_messages.Message): """Provides data to pass through to the worker harness. Fields: baseUrl: The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/" reportingEnabled: Whether to send work progress updates to the service. servicePath: The Cloud Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects". shuffleServicePath: The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1". tempStoragePrefix: The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object} workerId: The ID of the worker running this pipeline. """ baseUrl = _messages.StringField(1) reportingEnabled = _messages.BooleanField(2) servicePath = _messages.StringField(3) shuffleServicePath = _messages.StringField(4) tempStoragePrefix = _messages.StringField(5) workerId = _messages.StringField(6) class WorkerShutdownNotice(_messages.Message): """Shutdown notification from workers. This is to be sent by the shutdown script of the worker VM so that the backend knows that the VM is being shut down. Fields: reason: The reason for the worker shutdown. Current possible values are: "UNKNOWN": shutdown reason is unknown. "PREEMPTION": shutdown reason is preemption. Other possible reasons may be added in the future. """ reason = _messages.StringField(1) class WorkerShutdownNoticeResponse(_messages.Message): """Service-side response to WorkerMessage issuing shutdown notice.""" class WriteInstruction(_messages.Message): """An instruction that writes records. Takes one input, produces no outputs. Fields: input: The input. sink: The sink to write to. """ input = _messages.MessageField('InstructionInput', 1) sink = _messages.MessageField('Sink', 2) encoding.AddCustomJsonFieldMapping( StandardQueryParameters, 'f__xgafv', '$.xgafv') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
{ "content_hash": "1ebc93eb261f7c407b00597ed858dc4c", "timestamp": "", "source": "github", "line_count": 5077, "max_line_length": 102, "avg_line_length": 39.75930667717156, "alnum_prop": 0.7325149362423089, "repo_name": "tgroh/beam", "id": "fdc1681f33e1d7f6281b2433d748cac1e08dedfd", "size": "202643", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sdks/python/apache_beam/runners/dataflow/internal/clients/dataflow/dataflow_v1b3_messages.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "FreeMarker", "bytes": "5994" }, { "name": "Go", "bytes": "2167258" }, { "name": "Groovy", "bytes": "127719" }, { "name": "Java", "bytes": "17206671" }, { "name": "Python", "bytes": "3584300" }, { "name": "Shell", "bytes": "82600" } ], "symlink_target": "" }
class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = ''' options: hostname: description: - IP Address or hostname of APIC resolvable by Ansible control host. required: yes aliases: [ host ] username: description: - The username to use for authentication. required: yes default: admin aliases: [ user ] password: description: - The password to use for authentication. required: yes timeout: description: - The socket level timeout in seconds. default: 30 use_proxy: description: - If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts. default: 'yes' type: bool use_ssl: description: - If C(no), an HTTP connection will be used instead of the default HTTPS connection. type: bool default: 'yes' validate_certs: description: - If C(no), SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites using self-signed certificates. type: bool default: 'yes' notes: - By default, if an environment variable C(<protocol>_proxy) is set on the target host, requests will be sent through that proxy. This behaviour can be overridden by setting a variable for this task (see `setting the environment <http://docs.ansible.com/playbooks_environment.html>`_), or by using the C(use_proxy) option. - HTTP redirects can redirect from HTTP to HTTPS so you should be sure that your proxy environment for both protocols is correct. '''
{ "content_hash": "9ea42de829eb8f89392ee0c908d6d56c", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 113, "avg_line_length": 32.6530612244898, "alnum_prop": 0.700625, "repo_name": "e-gob/plataforma-kioscos-autoatencion", "id": "db951b679420f8993b62bfd976537a13709cc8cd", "size": "2382", "binary": false, "copies": "36", "ref": "refs/heads/master", "path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/utils/module_docs_fragments/aci.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "41110" }, { "name": "C++", "bytes": "3804" }, { "name": "CSS", "bytes": "34823" }, { "name": "CoffeeScript", "bytes": "8521" }, { "name": "HTML", "bytes": "61168" }, { "name": "JavaScript", "bytes": "7206" }, { "name": "Makefile", "bytes": "1347" }, { "name": "PowerShell", "bytes": "584344" }, { "name": "Python", "bytes": "25506593" }, { "name": "Ruby", "bytes": "245726" }, { "name": "Shell", "bytes": "5075" } ], "symlink_target": "" }
""" Cloudbreak API Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a> OpenAPI spec version: 2.9.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class StackScaleRequestV2(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'group': 'str', 'desired_count': 'int' } attribute_map = { 'group': 'group', 'desired_count': 'desiredCount' } def __init__(self, group=None, desired_count=None): """ StackScaleRequestV2 - a model defined in Swagger """ self._group = None self._desired_count = None self.group = group self.desired_count = desired_count @property def group(self): """ Gets the group of this StackScaleRequestV2. name of the instance group :return: The group of this StackScaleRequestV2. :rtype: str """ return self._group @group.setter def group(self, group): """ Sets the group of this StackScaleRequestV2. name of the instance group :param group: The group of this StackScaleRequestV2. :type: str """ if group is None: raise ValueError("Invalid value for `group`, must not be `None`") self._group = group @property def desired_count(self): """ Gets the desired_count of this StackScaleRequestV2. scaling adjustment of the instance groups :return: The desired_count of this StackScaleRequestV2. :rtype: int """ return self._desired_count @desired_count.setter def desired_count(self, desired_count): """ Sets the desired_count of this StackScaleRequestV2. scaling adjustment of the instance groups :param desired_count: The desired_count of this StackScaleRequestV2. :type: int """ if desired_count is None: raise ValueError("Invalid value for `desired_count`, must not be `None`") self._desired_count = desired_count def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, StackScaleRequestV2): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
{ "content_hash": "99f837674b15122e8254f680f69f996b", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 984, "avg_line_length": 32.16339869281046, "alnum_prop": 0.5952042267831742, "repo_name": "Chaffelson/whoville", "id": "76c68448b99426f17e641bce7fe28dbc8ccd5dd8", "size": "4938", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "whoville/cloudbreak/models/stack_scale_request_v2.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "6961" }, { "name": "HTML", "bytes": "72038" }, { "name": "Python", "bytes": "3729355" }, { "name": "Shell", "bytes": "95963" }, { "name": "TSQL", "bytes": "345" } ], "symlink_target": "" }
from __future__ import print_function import argparse import os import sys py3 = sys.version_info.major == 3 def warn(msg): print('[powerline-bash] ', msg) class Powerline: symbols = { 'compatible': { 'lock': 'RO', 'network': 'SSH', 'separator': u'\u25B6', 'separator_thin': u'\u276F' }, 'patched': { 'lock': u'\uE0A2', 'network': u'\uE0A2', 'separator': u'\uE0B0', 'separator_thin': u'\uE0B1' }, 'flat': { 'lock': '', 'network': '', 'separator': '', 'separator_thin': '' }, } color_templates = { 'bash': '\\[\\e%s\\]', 'zsh': '%%{%s%%}', 'bare': '%s', } def __init__(self, args, cwd): self.args = args self.cwd = cwd mode, shell = args.mode, args.shell self.color_template = self.color_templates[shell] self.reset = self.color_template % '[0m' self.lock = Powerline.symbols[mode]['lock'] self.network = Powerline.symbols[mode]['network'] self.separator = Powerline.symbols[mode]['separator'] self.separator_thin = Powerline.symbols[mode]['separator_thin'] self.segments = [] def color(self, prefix, code): if code is None: return '' else: return self.color_template % ('[%s;5;%sm' % (prefix, code)) def fgcolor(self, code): return self.color('38', code) def bgcolor(self, code): return self.color('48', code) def append(self, content, fg, bg, separator=None, separator_fg=None): self.segments.append((content, fg, bg, separator if separator is not None else self.separator, separator_fg if separator_fg is not None else bg)) def draw(self): text = (''.join(self.draw_segment(i) for i in range(len(self.segments))) + self.reset) + ' ' if py3: return text else: return text.encode('utf-8') def draw_segment(self, idx): segment = self.segments[idx] next_segment = self.segments[idx + 1] if idx < len(self.segments)-1 else None return ''.join(( self.fgcolor(segment[1]), self.bgcolor(segment[2]), segment[0], self.bgcolor(next_segment[2]) if next_segment else self.reset, self.fgcolor(segment[4]), segment[3])) def get_valid_cwd(): """ We check if the current working directory is valid or not. Typically happens when you checkout a different branch on git that doesn't have this directory. We return the original cwd because the shell still considers that to be the working directory, so returning our guess will confuse people """ # Prefer the PWD environment variable. Python's os.getcwd function follows # symbolic links, which is undesirable. But if PWD is not set then fall # back to this func try: cwd = os.getenv('PWD') or os.getcwd() except: warn("Your current directory is invalid. If you open a ticket at " + "https://github.com/milkbikis/powerline-shell/issues/new " + "we would love to help fix the issue.") sys.stdout.write("> ") sys.exit(1) parts = cwd.split(os.sep) up = cwd while parts and not os.path.exists(up): parts.pop() up = os.sep.join(parts) if cwd != up: warn("Your current directory is invalid. Lowest valid directory: " + up) return cwd if __name__ == "__main__": arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--cwd-mode', action='store', help='How to display the current directory', default='fancy', choices=['fancy', 'plain', 'dironly']) arg_parser.add_argument('--cwd-only', action='store_true', help='Deprecated. Use --cwd-mode=dironly') arg_parser.add_argument('--cwd-max-depth', action='store', type=int, default=5, help='Maximum number of directories to show in path') arg_parser.add_argument('--cwd-max-dir-size', action='store', type=int, help='Maximum number of letters displayed for each directory in the path') arg_parser.add_argument('--colorize-hostname', action='store_true', help='Colorize the hostname based on a hash of itself.') arg_parser.add_argument('--mode', action='store', default='patched', help='The characters used to make separators between segments', choices=['patched', 'compatible', 'flat']) arg_parser.add_argument('--shell', action='store', default='bash', help='Set this to your shell type', choices=['bash', 'zsh', 'bare']) arg_parser.add_argument('prev_error', nargs='?', type=int, default=0, help='Error code returned by the last command') args = arg_parser.parse_args() powerline = Powerline(args, get_valid_cwd())
{ "content_hash": "bfd3ef4db35536a1d2bf5e1f62d275fc", "timestamp": "", "source": "github", "line_count": 143, "max_line_length": 86, "avg_line_length": 35.29370629370629, "alnum_prop": 0.5742024965325936, "repo_name": "juancruzmdq/dotfiles", "id": "8b95fb3a951ca94472bac7b2b46b88f114b70b52", "size": "5093", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "powerline-shell/powerline_shell_base.py", "mode": "33261", "license": "mit", "language": [ { "name": "Objective-C", "bytes": "1928" }, { "name": "Python", "bytes": "39592" }, { "name": "Shell", "bytes": "115416" } ], "symlink_target": "" }
from __future__ import print_function, unicode_literals from __future__ import division, absolute_import from tdbus import SimpleDBusConnection import tdbus conn = SimpleDBusConnection(tdbus.DBUS_BUS_SYSTEM) print('Listing all well-known services on the system bus:') print() result = conn.call_method(tdbus.DBUS_PATH_DBUS, 'ListNames', tdbus.DBUS_INTERFACE_DBUS, destination=tdbus.DBUS_SERVICE_DBUS) for name in result.get_args()[0]: if not name.startswith(':'): print(' %s' % name) print()
{ "content_hash": "0ac23897fa560b42a4afc89aba3b70ec", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 87, "avg_line_length": 30.11111111111111, "alnum_prop": 0.6955719557195572, "repo_name": "Jajcus/python-tdbus", "id": "85d8b34fc721018e0191d31695b96a55dd6e0791", "size": "993", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/listnames.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "62268" }, { "name": "Python", "bytes": "42507" } ], "symlink_target": "" }
from typing import Dict, List, Optional, TypeVar from django.conf import settings from django.conf.urls import url from django.core.urlresolvers import LocaleRegexProvider from django.utils.module_loading import import_string """This module declares all of the (documented) integrations available in the Zulip server. The Integration class is used as part of generating the documentation on the /integrations page, while the WebhookIntegration class is also used to generate the URLs in `zproject/urls.py` for webhook integrations. To add a new non-webhook integration, add code to the INTEGRATIONS dictionary below. To add a new webhook integration, declare a WebhookIntegration in the WEBHOOK_INTEGRATIONS list below (it will be automatically added to INTEGRATIONS). Over time, we expect this registry to grow additional convenience features for writing and configuring integrations efficiently. """ class Integration(object): DEFAULT_LOGO_STATIC_PATH = 'static/images/integrations/logos/{name}.png' def __init__(self, name, client_name, logo=None, secondary_line_text=None, display_name=None): # type: (str, str, Optional[str], Optional[str], Optional[str]) -> None self.name = name self.client_name = client_name self.secondary_line_text = secondary_line_text if logo is None: logo = self.DEFAULT_LOGO_STATIC_PATH.format(name=name) self.logo = logo if display_name is None: display_name = name.title() self.display_name = display_name def is_enabled(self): # type: () -> bool return True class EmailIntegration(Integration): def is_enabled(self): # type: () -> bool return settings.EMAIL_GATEWAY_BOT != "" class WebhookIntegration(Integration): DEFAULT_FUNCTION_PATH = 'zerver.views.webhooks.{name}.api_{name}_webhook' DEFAULT_URL = 'api/v1/external/{name}' DEFAULT_CLIENT_NAME = 'Zulip{name}Webhook' def __init__(self, name, client_name=None, logo=None, secondary_line_text=None, function=None, url=None, display_name=None): # type: (str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> None if client_name is None: client_name = self.DEFAULT_CLIENT_NAME.format(name=name.title()) super(WebhookIntegration, self).__init__(name, client_name, logo, secondary_line_text, display_name) if function is None: function = self.DEFAULT_FUNCTION_PATH.format(name=name) if isinstance(function, str): function = import_string(function) self.function = function if url is None: url = self.DEFAULT_URL.format(name=name) self.url = url @property def url_object(self): # type: () -> LocaleRegexProvider return url(self.url, self.function) class HubotLozenge(Integration): GIT_URL_TEMPLATE = "https://github.com/hubot-scripts/hubot-{}" def __init__(self, name, display_name=None, logo=None, logo_alt=None, git_url=None): # type: (str, Optional[str], Optional[str], Optional[str], Optional[str]) -> None if logo_alt is None: logo_alt = "{} logo".format(name.title()) self.logo_alt = logo_alt if git_url is None: git_url = self.GIT_URL_TEMPLATE.format(name) self.git_url = git_url super(HubotLozenge, self).__init__(name, name, logo, display_name=display_name) class GithubIntegration(WebhookIntegration): """ We need this class to don't creating url object for git integrations. We want to have one generic url with dispatch function for github service and github webhook. """ @property def url_object(self): # type: () -> None return WEBHOOK_INTEGRATIONS = [ WebhookIntegration('airbrake'), WebhookIntegration('appfollow', display_name='AppFollow'), WebhookIntegration('beanstalk'), WebhookIntegration('bitbucket2', logo='static/images/integrations/logos/bitbucket.png', display_name='Bitbucket'), WebhookIntegration('bitbucket', secondary_line_text='(Enterprise)'), WebhookIntegration('circleci', display_name='CircleCI'), WebhookIntegration('codeship'), WebhookIntegration('crashlytics'), WebhookIntegration('deskdotcom', logo='static/images/integrations/logos/deskcom.png', display_name='Desk.com'), WebhookIntegration('freshdesk'), GithubIntegration( 'github', function='zerver.views.webhooks.github.api_github_landing', display_name='GitHub', secondary_line_text='(deprecated)' ), GithubIntegration( 'github_webhook', display_name='GitHub', logo='static/images/integrations/logos/github.png', secondary_line_text='(webhook)', function='zerver.views.webhooks.github_webhook.api_github_webhook' ), WebhookIntegration('gitlab', display_name='GitLab'), WebhookIntegration('helloworld', display_name='Hello World'), WebhookIntegration('heroku', display_name='Heroku'), WebhookIntegration('ifttt', function='zerver.views.webhooks.ifttt.api_iftt_app_webhook', display_name='IFTTT'), WebhookIntegration('jira', secondary_line_text='(hosted or v5.2+)', display_name='JIRA'), WebhookIntegration('librato'), WebhookIntegration('mention', display_name='Mention'), WebhookIntegration('newrelic', display_name='New Relic'), WebhookIntegration('pagerduty'), WebhookIntegration('papertrail'), WebhookIntegration('pingdom'), WebhookIntegration('pivotal', display_name='Pivotal Tracker'), WebhookIntegration('semaphore'), WebhookIntegration('sentry'), WebhookIntegration('stash'), WebhookIntegration('stripe', display_name='Stripe'), WebhookIntegration('taiga'), WebhookIntegration('teamcity'), WebhookIntegration('transifex'), WebhookIntegration('travis', display_name='Travis CI'), WebhookIntegration('trello', secondary_line_text='(webhook)'), WebhookIntegration('updown'), WebhookIntegration( 'yo', function='zerver.views.webhooks.yo.api_yo_app_webhook', logo='static/images/integrations/logos/yo-app.png', display_name='Yo App' ), WebhookIntegration('zendesk') ] # type: List[WebhookIntegration] INTEGRATIONS = { 'asana': Integration('asana', 'asana'), 'basecamp': Integration('basecamp', 'basecamp'), 'capistrano': Integration('capistrano', 'capistrano'), 'codebase': Integration('codebase', 'codebase'), 'email': Integration('email', 'email'), 'git': Integration('git', 'git'), 'hubot': Integration('hubot', 'hubot'), 'jenkins': Integration('jenkins', 'jenkins', secondary_line_text='(or Hudson)'), 'jira-plugin': Integration( 'jira-plugin', 'jira-plugin', logo='static/images/integrations/logos/jira.png', secondary_line_text='(locally installed)', display_name='JIRA' ), 'mercurial': Integration('mercurial', 'mercurial', display_name='Mercurial (hg)'), 'nagios': Integration('nagios', 'nagios'), 'perforce': Integration('perforce', 'perforce'), 'phabricator': Integration('phabricator', 'phabricator'), 'puppet': Integration('puppet', 'puppet'), 'redmine': Integration('redmine', 'redmine'), 'rss': Integration('rss', 'rss', display_name='RSS'), 'subversion': Integration('subversion', 'subversion'), 'trac': Integration('trac', 'trac'), 'trello-plugin': Integration( 'trello-plugin', 'trello-plugin', logo='static/images/integrations/logos/trello.png', secondary_line_text='(legacy)', display_name='Trello' ), 'twitter': Integration('twitter', 'twitter'), } # type: Dict[str, Integration] HUBOT_LOZENGES = { 'assembla': HubotLozenge('assembla'), 'bonusly': HubotLozenge('bonusly'), 'chartbeat': HubotLozenge('chartbeat'), 'darksky': HubotLozenge('darksky', display_name='Dark Sky', logo_alt='Dark Sky logo'), 'hangouts': HubotLozenge('google-hangouts', display_name="Hangouts"), 'instagram': HubotLozenge('instagram'), 'mailchump': HubotLozenge('mailchimp', display_name='MailChimp', logo_alt='MailChimp logo'), 'translate': HubotLozenge('google-translate', display_name="Translate", logo_alt='Google Translate logo'), 'youtube': HubotLozenge('youtube', display_name='YouTube', logo_alt='YouTube logo') } for integration in WEBHOOK_INTEGRATIONS: INTEGRATIONS[integration.name] = integration
{ "content_hash": "a81cbac0039ef191fcbdd3f03aa9c692", "timestamp": "", "source": "github", "line_count": 207, "max_line_length": 119, "avg_line_length": 41.27536231884058, "alnum_prop": 0.6772003745318352, "repo_name": "TigorC/zulip", "id": "c736c2c5d96684a901b08ec16794b7d15cdde8aa", "size": "8544", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "zerver/lib/integrations.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "246442" }, { "name": "Groovy", "bytes": "5509" }, { "name": "HTML", "bytes": "462946" }, { "name": "JavaScript", "bytes": "1404260" }, { "name": "Nginx", "bytes": "1280" }, { "name": "Pascal", "bytes": "1113" }, { "name": "Perl", "bytes": "401825" }, { "name": "Puppet", "bytes": "82466" }, { "name": "Python", "bytes": "3018595" }, { "name": "Ruby", "bytes": "249748" }, { "name": "Shell", "bytes": "37195" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function, unicode_literals) import os import logging LOGGER = logging.getLogger(__name__) def expand_path(path, log=False): expanded = os.path.normpath(os.path.abspath(path)) if log and expanded != path: LOGGER.debug('Expanded "%s" to "%s".', path, expanded) return expanded
{ "content_hash": "03f0d86781796a943edf74005761664a", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 66, "avg_line_length": 25.4, "alnum_prop": 0.6456692913385826, "repo_name": "digitalrounin/py-lambda-packer", "id": "1e272a03e08bafe720e6f7956a282b5199665015", "size": "381", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plpacker/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "69536" } ], "symlink_target": "" }
from __future__ import unicode_literals from rest_framework.decorators import api_view from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponse from django.conf import settings from django.shortcuts import render from tesseract import tesseract_exec import os, StringIO def index(request): return render(request, 'index.html') @csrf_exempt @api_view(['GET', 'POST']) def tesseractView(request, format=None): if request.data.get('image') is None: return Response("ERROR: Please upload an image", status=status.HTTP_400_BAD_REQUEST) ### Receive parameters set by user parameters = request.data.dict() del parameters['image'] # parameter 'image' will be processed seperately image_object = request.FILES['image'] ### Call tesseract function text = tesseract_exec(image_object, parameters) ### Return tesseract result from memory imagename_base, ext = os.path.splitext(str(image_object)) fname = imagename_base + ".txt" strio = StringIO.StringIO() strio.write(text) response = HttpResponse(strio.getvalue(), content_type="application/force-download") response["Content-Disposition"] = 'attachment; filename=%s' % fname return response
{ "content_hash": "8443fd9c792061faa465e20049102581", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 92, "avg_line_length": 32.78947368421053, "alnum_prop": 0.7279293739967897, "repo_name": "acislab/HuMaIN_Microservices", "id": "ad9e9c2ef2035856135d735631786b56d1fb20e3", "size": "1270", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "TesseractService/api/views.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1545" }, { "name": "CSS", "bytes": "310992" }, { "name": "Dockerfile", "bytes": "5139" }, { "name": "HTML", "bytes": "23854" }, { "name": "JavaScript", "bytes": "458124" }, { "name": "Python", "bytes": "549404" }, { "name": "Shell", "bytes": "1455" } ], "symlink_target": "" }
from django.contrib.admin import widgets from django import forms class FilteredSelectMultiple(forms.SelectMultiple): """ removing 2 select fields widget """ def __init__(self, verbose_name, is_stacked, attrs=None, choices=[]): super(FilteredSelectMultiple, self).__init__(attrs, choices) widgets.FilteredSelectMultiple = FilteredSelectMultiple # using jquery ui to display .vDateField widgets.AdminDateWidget.media = None # TODO: find why this breaks django admin # class MyAdminDateWidget(widgets.AdminDateWidget): # def _media(self): # return forms.Media(js=['admintools_bootstrap/js/jquery-ui-timepicker-addon.js']) # media = property(_media) # widgets.AdminDateWidget = MyAdminDateWidget # patching admintools menu item from admin_tools.menu import items # adding icon argument to base MenuItem class items.MenuItem.icon = None import admintools_bootstrap.settings # patching django's FieldSet from django.contrib.admin import helpers from django import forms class Fieldset(helpers.Fieldset): def _media(self): if 'collapse' in self.classes: return forms.Media(js=['admintools_bootstrap/bootstrap/js/bootstrap-collapse.js']) return forms.Media() media = property(_media) helpers.Fieldset = Fieldset
{ "content_hash": "6bddc6555f90d0e85c2649c5d7035a96", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 94, "avg_line_length": 27.659574468085108, "alnum_prop": 0.7369230769230769, "repo_name": "quinode/django-admintools-bootstrap", "id": "e7c37e20baad25b4ecde92501e0fae605486ca34", "size": "1334", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "admintools_bootstrap/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "73669" }, { "name": "HTML", "bytes": "350383" }, { "name": "JavaScript", "bytes": "69838" }, { "name": "Makefile", "bytes": "1852" }, { "name": "Python", "bytes": "8524" } ], "symlink_target": "" }
from setuptools import setup setup( name="pytest_snmpserver", version="0.1.9", packages=["pytest_snmpserver"], long_description="SNMP server as a pytest plugin", long_description_content_type="text/markdown", python_requires=">=3.6", install_requires=[], entry_points={"pytest11": ["pytest_snmpserver = pytest_snmpserver.pytest_plugin"]}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
{ "content_hash": "368c976fa29945840ea540267e5e151d", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 87, "avg_line_length": 32.35294117647059, "alnum_prop": 0.6345454545454545, "repo_name": "tardate/LittleCodingKata", "id": "a472d9c1617973ea35bc1f7a165a7637c8b18ffa", "size": "574", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/snmp_server/pytest_snmpserver-0.1.9/setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "2803" }, { "name": "Batchfile", "bytes": "34367" }, { "name": "C", "bytes": "53204" }, { "name": "C#", "bytes": "13882" }, { "name": "C++", "bytes": "135990" }, { "name": "CSS", "bytes": "262162" }, { "name": "CoffeeScript", "bytes": "1637" }, { "name": "Dockerfile", "bytes": "2641" }, { "name": "Elixir", "bytes": "2867" }, { "name": "Elm", "bytes": "700" }, { "name": "Go", "bytes": "878" }, { "name": "HTML", "bytes": "543696" }, { "name": "Haml", "bytes": "3958" }, { "name": "Haskell", "bytes": "196" }, { "name": "JavaScript", "bytes": "2206937" }, { "name": "Less", "bytes": "797" }, { "name": "Makefile", "bytes": "8974" }, { "name": "Mustache", "bytes": "1234" }, { "name": "PHP", "bytes": "14443" }, { "name": "PLSQL", "bytes": "4143" }, { "name": "Pascal", "bytes": "319" }, { "name": "Python", "bytes": "82458" }, { "name": "R", "bytes": "11824" }, { "name": "Ruby", "bytes": "509817" }, { "name": "Rust", "bytes": "5904" }, { "name": "SCSS", "bytes": "6053" }, { "name": "Shell", "bytes": "30987" } ], "symlink_target": "" }
""" garage.exceptions * created: 2014-08-23 Kevin Chan <kefin@makedostudio.com> * updated: 2014-11-21 kchan """ from django.core.exceptions import ImproperlyConfigured
{ "content_hash": "9a8b9ef12110ac948128fd5b4f09546b", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 57, "avg_line_length": 21.25, "alnum_prop": 0.7647058823529411, "repo_name": "kefin/django-garage", "id": "ac4951013ee7026200cacd67eabbe63692501eda", "size": "194", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "garage/exceptions.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "143210" } ], "symlink_target": "" }
class AddArgs: def __init__(self, args, add_rec=False, add_skip=False): self.args = args self.add_rec = add_rec self.add_skip = add_skip def extend(self, args): if self.args is not None: self.args.extend(args) else: self.args = args
{ "content_hash": "d449583ddb79d03f77364b4d0cfbfec5", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 57, "avg_line_length": 19.53846153846154, "alnum_prop": 0.6535433070866141, "repo_name": "amol9/redcmd", "id": "080c8e566ec2be8d777f2d5a0a73cea2fe283885", "size": "256", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "redcmd/add_args.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "106122" }, { "name": "Shell", "bytes": "470" } ], "symlink_target": "" }
import datetime from south.db import db from south.v2 import DataMigration from django.db import models from radpress.readers import get_reader class Migration(DataMigration): no_dry_run = True def forwards(self, orm): reader = get_reader() model_names = ['radpress.Article', 'radpress.Page'] for model_name in model_names: for entry in orm[model_name].objects.all(): content_body, metadata = reader(entry.content).read() entry.content_body = content_body if model_name == 'radpress.Article': if entry.cover_image: image_id = entry.cover_image.id else: image_id = 'not specified' tags = entry.tags.all().values_list('name', flat=True) published = 'yes' if entry.is_published else 'no' content = [ entry.title, '#' * len(entry.title), ':slug: %s' % entry.slug, ':tags: %s' % ', '.join(tags), ':published: %s' % published, ':image: %s' % image_id, '', entry.content] entry.content = '\n'.join(content) entry.save() def backwards(self, orm): "Write your backwards methods here." models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'radpress.article': { 'Meta': {'ordering': "('-created_at', '-updated_at')", 'object_name': 'Article'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}), 'content': ('django.db.models.fields.TextField', [], {}), 'content_body': ('django.db.models.fields.TextField', [], {}), 'cover_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.EntryImage']", 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['radpress.Tag']", 'null': 'True', 'through': u"orm['radpress.ArticleTag']", 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}) }, u'radpress.articletag': { 'Meta': {'object_name': 'ArticleTag'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.Article']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.Tag']"}) }, u'radpress.entryimage': { 'Meta': {'object_name': 'EntryImage'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, u'radpress.menu': { 'Meta': {'unique_together': "(('order', 'page'),)", 'object_name': 'Menu'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.Page']", 'unique': 'True'}) }, u'radpress.page': { 'Meta': {'ordering': "('-created_at', '-updated_at')", 'object_name': 'Page'}, 'content': ('django.db.models.fields.TextField', [], {}), 'content_body': ('django.db.models.fields.TextField', [], {}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}) }, u'radpress.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) } } complete_apps = ['radpress'] symmetrical = True
{ "content_hash": "3dc32d1f079bd6087ac7916e3089df19", "timestamp": "", "source": "github", "line_count": 134, "max_line_length": 207, "avg_line_length": 63.03731343283582, "alnum_prop": 0.5289451876405824, "repo_name": "gkmngrgn/radpress", "id": "501930e5bb8e0dc6612bcd29bded406c9301dc37", "size": "8471", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "radpress/south_migrations/0006_zen_mode_integration.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "64166" }, { "name": "JavaScript", "bytes": "4458" }, { "name": "Makefile", "bytes": "5572" }, { "name": "Python", "bytes": "112404" }, { "name": "Shell", "bytes": "5100" } ], "symlink_target": "" }
"""Settings for SAML SSO. Version Added: 5.0 """ from django.urls import reverse from django.utils.translation import gettext_lazy as _ from djblets.siteconfig.models import SiteConfiguration try: from onelogin.saml2.constants import OneLogin_Saml2_Constants as constants except ImportError: constants = None from reviewboard.admin.server import build_server_url class SAMLSignatureAlgorithm(object): """Definitions for the signature algorithm. Version Added: 5.0 """ DSA_SHA1 = 'dsa-sha1' RSA_SHA1 = 'rsa-sha1' RSA_SHA256 = 'rsa-sha256' RSA_SHA384 = 'rsa-sha384' RSA_SHA512 = 'rsa-sha512' CHOICES = ( (DSA_SHA1, 'DSA-SHA1'), (RSA_SHA1, 'RSA-SHA1'), (RSA_SHA256, 'RSA-SHA256'), (RSA_SHA384, 'RSA-SHA384'), (RSA_SHA512, 'RSA-SHA512'), ) if constants: TO_SAML2_SETTING_MAP = { DSA_SHA1: constants.DSA_SHA1, RSA_SHA1: constants.RSA_SHA1, RSA_SHA256: constants.RSA_SHA256, RSA_SHA384: constants.RSA_SHA384, RSA_SHA512: constants.RSA_SHA512, } FROM_SAML2_SETTING_MAP = { constants.DSA_SHA1: DSA_SHA1, constants.RSA_SHA1: RSA_SHA1, constants.RSA_SHA256: RSA_SHA256, constants.RSA_SHA384: RSA_SHA384, constants.RSA_SHA512: RSA_SHA512, } else: TO_SAML2_SETTING_MAP = {} FROM_SAML2_SETTING_MAP = {} class SAMLDigestAlgorithm(object): """Definitions for the digest algorithm. Version Added: 5.0 """ SHA1 = 'sha1' SHA256 = 'sha256' SHA384 = 'sha384' SHA512 = 'sha512' CHOICES = ( (SHA1, 'SHA1'), (SHA256, 'SHA256'), (SHA384, 'SHA384'), (SHA512, 'SHA512'), ) if constants: TO_SAML2_SETTING_MAP = { SHA1: constants.SHA1, SHA256: constants.SHA256, SHA384: constants.SHA384, SHA512: constants.SHA512, } FROM_SAML2_SETTING_MAP = { constants.SHA1: SHA1, constants.SHA256: SHA256, constants.SHA384: SHA384, constants.SHA512: SHA512, } else: TO_SAML2_SETTING_MAP = {} FROM_SAML2_SETTING_MAP = {} class SAMLBinding(object): """Definitions for the binding type. Version Added: 5.0 """ HTTP_POST = 'http-post' HTTP_REDIRECT = 'http-redirect' CHOICES = ( (HTTP_POST, _('HTTP POST')), (HTTP_REDIRECT, _('HTTP Redirect')), ) if constants: TO_SAML2_SETTING_MAP = { HTTP_POST: constants.BINDING_HTTP_POST, HTTP_REDIRECT: constants.BINDING_HTTP_REDIRECT, } FROM_SAML2_SETTING_MAP = { constants.BINDING_HTTP_POST: HTTP_POST, constants.BINDING_HTTP_REDIRECT: HTTP_REDIRECT, } else: TO_SAML2_SETTING_MAP = {} FROM_SAML2_SETTING_MAP = {} def get_saml2_settings(): """Return the SAML2.0 settings. Version Added: 5.0 Returns: dict: A dictionary of the settings to use for SAML operations. """ siteconfig = SiteConfiguration.objects.get_current() assert constants is not None return { 'strict': True, 'debug': True, 'idp': { 'entityId': siteconfig.get('saml_issuer'), 'singleSignOnService': { 'url': siteconfig.get('saml_sso_url'), 'binding': SAMLBinding.TO_SAML2_SETTING_MAP[ siteconfig.get('saml_sso_binding_type')], }, 'singleLogoutService': { 'url': siteconfig.get('saml_slo_url'), 'binding': SAMLBinding.TO_SAML2_SETTING_MAP[ siteconfig.get('saml_slo_binding_type')], }, 'x509cert': siteconfig.get('saml_verification_cert'), }, 'sp': { 'entityId': build_server_url( reverse('sso:saml:metadata', kwargs={'backend_id': 'saml'})), 'assertionConsumerService': { 'url': build_server_url( reverse('sso:saml:acs', kwargs={'backend_id': 'saml'})), 'binding': constants.BINDING_HTTP_POST, }, 'singleLogoutService': { 'url': build_server_url( reverse('sso:saml:sls', kwargs={'backend_id': 'saml'})), 'binding': constants.BINDING_HTTP_REDIRECT, }, 'NameIDFormat': constants.NAMEID_PERSISTENT, 'x509cert': '', 'privateKey': '', }, }
{ "content_hash": "79f35a65c4de6b537009dbad7f90a281", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 78, "avg_line_length": 26.72, "alnum_prop": 0.5410607356715141, "repo_name": "reviewboard/reviewboard", "id": "6982227519be0e3cbb2fa57c9b9ec35782167f4c", "size": "4676", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "reviewboard/accounts/sso/backends/saml/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "10167" }, { "name": "Dockerfile", "bytes": "7721" }, { "name": "HTML", "bytes": "226489" }, { "name": "JavaScript", "bytes": "3991608" }, { "name": "Less", "bytes": "438017" }, { "name": "Python", "bytes": "9186415" }, { "name": "Shell", "bytes": "3855" } ], "symlink_target": "" }
import rospy from pibot import srv, msg from std_msgs.msg import String pub = None prior = 24 def handler(req): global pub m = msg.window() m.prior = prior m.timeout = req.timeout m.content = req.message m.id = 'msgbox' pub.publish(m) return True def init(): rospy.init_node('screen_msgbox') global pub global sub pub = rospy.Publisher('/pibot/windows', msg.window, queue_size=1) s = rospy.Service('srv_msgbox', srv.msgbox, handler) if __name__ == '__main__': init() rospy.spin()
{ "content_hash": "3b51ae754f61fc59f10937621e31bd79", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 69, "avg_line_length": 16.666666666666668, "alnum_prop": 0.6181818181818182, "repo_name": "psby233/pibot", "id": "a1ccf42fe5d0a2bb1b4743468d9f7b4e74f7c238", "size": "573", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/screen_msgbox.py", "mode": "33261", "license": "mit", "language": [ { "name": "CMake", "bytes": "6756" }, { "name": "Python", "bytes": "63058" } ], "symlink_target": "" }
"""Tests for google.protobuf.message_factory.""" import os os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp' os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2' # We must set the implementation version above before the google3 imports. # pylint: disable=g-import-not-at-top from google.apputils import basetest from google.protobuf.internal import api_implementation # Run all tests from the original module by putting them in our namespace. # pylint: disable=wildcard-import from google.protobuf.internal.message_factory_test import * class ConfirmCppApi2Test(basetest.TestCase): def testImplementationSetting(self): self.assertEqual('cpp', api_implementation.Type()) self.assertEqual(2, api_implementation.Version()) if __name__ == '__main__': basetest.main()
{ "content_hash": "29bddceea1fd3cca2326824af15a8071", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 74, "avg_line_length": 33.625, "alnum_prop": 0.7695167286245354, "repo_name": "da2ce7/protobuf", "id": "fb52e1b1c6cb4e08f185e9cbedc3ca34fab12934", "size": "2446", "binary": false, "copies": "25", "ref": "refs/heads/master", "path": "python/google/protobuf/pyext/message_factory_cpp2_test.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "19595" }, { "name": "C++", "bytes": "4100771" }, { "name": "Emacs Lisp", "bytes": "7798" }, { "name": "Java", "bytes": "1564975" }, { "name": "Objective-C", "bytes": "31244" }, { "name": "Python", "bytes": "715472" }, { "name": "Shell", "bytes": "16411" }, { "name": "VimL", "bytes": "3731" } ], "symlink_target": "" }
import logging if __name__ == '__main__': logging.basicConfig() _log = logging.getLogger(__name__) import profile import unittest class TestIssue0048 (unittest.TestCase): def testProfile (self): amap = profile.AbstractFeatureBaseType._AttributeMap self.assertEqual(1, len(amap)) if __name__ == '__main__': unittest.main()
{ "content_hash": "1f4acd0bf71a637498dea546789400d1", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 60, "avg_line_length": 25.142857142857142, "alnum_prop": 0.6647727272727273, "repo_name": "pabigot/pyxb", "id": "065d36a34245928044a0d3e7e2d0ecfc1520d03c", "size": "376", "binary": false, "copies": "1", "ref": "refs/heads/next", "path": "tests/trac/issue-0048/check.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1927697" }, { "name": "Shell", "bytes": "20792" } ], "symlink_target": "" }
"""Store game events in database Usage: munerator [options] store Options: -v --verbose Verbose logging --context-socket url ZMQ socket for conext event [default: tcp://127.0.0.1:9002] --database ip:port Host and port for mongo database [default: 127.0.0.1:27017] --rcon-socket url ZMQ socket for rcon commands [default: tcp://127.0.0.1:9005] """ import json import logging import datetime from functools import partial import zmq from docopt import docopt from munerator.common.models import Games, Players, Votes, Gamemaps from munerator.common.database import setup_eve_mongoengine from munerator.games import GAMETYPE_IDS log = logging.getLogger(__name__) def handle_events(in_socket, rcon_socket): """ Loop over incoming messages and handle them individually. """ # prime commands for current game info rcon_socket.send_string('status') rcon_socket.send_string('getstatus') handled = 0 log.info('listening for game events') while True: msg = in_socket.recv_string() log.debug('got: %s' % msg) data = json.loads(msg.split(' ', 1)[-1]) kind = data.get('kind') try: handle_event(kind, data, rcon_socket) except: log.exception('error in event handling %s' % msg) handled += 1 if not handled % 100: log.info('handled another 100 messages (total %s)' % handled) def handle_event(kind, data, rcon_socket): """ Parse event message and update database with new information. """ # get player and/or game id from data player_id = str(data.get('client_info', {}).get('guid', '')) timestamp = str(data.get('game_info', {}).get('timestamp', '')) player = Players.objects(guids=player_id).first() if not player: player = Players(guids=[player_id]) log.info('added new player %s' % data.get('client_info').get('name')) player.save() game, new_game = Games.objects.get_or_create(timestamp=timestamp) if timestamp else (None, None) # handle player updates if player and kind in ['clientbegin', 'clientdisconnect', 'clientuserinfochanged', 'playerscore', 'clientstatus']: # on name change, store previous name if data['client_info'] and player.name != data['client_info']['name']: player.update(add_to_set__names=data['client_info']['name']) # update variable data player.update(**{'set__%s' % k: v for k, v in data['client_info'].items() if k in player.update_fields}) # add player to game if game: game.update(add_to_set__players=player) # set last seen time player.update(set__last_seen=datetime.datetime.now()) log.debug('updated player') # handle game updates if game and (kind in ['initgame', 'shutdowngame', 'getstatus'] or new_game): # reset all current games if kind == 'initgame': Games.objects(current=True).update(set__current=False) # update variable data game.update(**{'set__%s' % k: v for k, v in data['game_info'].items() if k in game.update_fields}) # set game map gamemap, new = Gamemaps.objects.get_or_create(name=data['game_info']['mapname']) if new: log.info('added map %s' % gamemap.name) if game.gametype and int(game.gametype) in GAMETYPE_IDS: gamemap.update(add_to_set__gametypes=int(game.gametype)) gamemap.save() game.update(set__gamemap=gamemap) # store game extra settings if data.get('extras'): game.options = data.get('extras') game.save() log.debug('updated game') # on game shutdown if kind == 'shutdowngame': if game.players: # update gamemap with last played time and increment play count gamemap.update(inc__times_played=1, set__last_played=datetime.datetime.now()) else: # delete game if not played game.delete() # reset players online status just to be sure Players.objects(online=True).update(set__online=False, set__score=None, set__team=None) # handle votes if kind == 'say' and player and game: vote = None if data.get('text') in ['+1', 'gg', 'GG', 'GGG', 'like', 'fuckthismaprocks', '++', '+1337']: vote = 1 elif data.get('text') in ['-1', '--1', '-11', '-1000', '-2', 'dislike', 'hate', 'RAGE!!!', 'fuckthismap', '--']: vote = -1 if vote: num_players = data.get('game_info', {}).get('num_players') vote_obj = Votes( player=player, game=game, gamemap=game.gamemap, gametype=game.gametype, mapname=game.gamemap.name, num_players=num_players, vote=vote ) rcon_socket.send_string('say %s^7 your vote has been counted' % player.name) vote_obj.save() log.info('saved vote') # add vote to game game.update(add_to_set__votes=vote_obj) def main(argv): args = docopt(__doc__, argv=argv) # setup zmq input socket context = zmq.Context() in_socket = context.socket(zmq.SUB) in_socket.connect(args['--context-socket']) # apply message filters filters = [ 'initgame', 'shutdowngame', 'clientdisconnect', 'say', 'clientbegin', 'clientuserinfochanged', 'playerscore' ] add_filter = partial(in_socket.setsockopt, zmq.SUBSCRIBE) map(add_filter, filters) # setup rcon socket rcon_socket = context.socket(zmq.PUSH) rcon_socket.connect(args['--rcon-socket']) # setup database host, port = args['--database'].split(':') setup_eve_mongoengine(host, port) # start event loop handle_events(in_socket, rcon_socket)
{ "content_hash": "f2248a14e98f2328342a45feb0890018", "timestamp": "", "source": "github", "line_count": 174, "max_line_length": 120, "avg_line_length": 33.7816091954023, "alnum_prop": 0.60326641714869, "repo_name": "aequitas/munerator", "id": "348863d0c75c5baece8974aa053d5bdc6ec38c00", "size": "5878", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "munerator/store.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "539" }, { "name": "HTML", "bytes": "2052" }, { "name": "Handlebars", "bytes": "11697" }, { "name": "JavaScript", "bytes": "27664" }, { "name": "Makefile", "bytes": "2090" }, { "name": "Python", "bytes": "71095" }, { "name": "Shell", "bytes": "454" } ], "symlink_target": "" }
from misaka import Markdown, BaseRenderer, HtmlRenderer, \ SmartyPants, \ EXT_FENCED_CODE, EXT_TABLES, EXT_AUTOLINK, EXT_STRIKETHROUGH, \ EXT_SUPERSCRIPT, HTML_USE_XHTML, \ TABLE_ALIGN_L, TABLE_ALIGN_R, TABLE_ALIGN_C, \ TABLE_ALIGNMASK, TABLE_HEADER class BleepRenderer(HtmlRenderer, SmartyPants): def block_code(self, text, lang): if lang: lang = ' class="%s"' % lang else: lang = '' return '\n<pre%s><code>%s</code></pre>\n' % (lang, text) def block_quote(self, text): return '\n<blockquote>%s</blockquote>\n' % text def block_html(self, text): return '\n%s' % text def header(self, text, level): return '\n<h%d>%s</h%d>\n' % (level, text, level) def hrule(self): if self.flags & HTML_USE_XHTML: return '\n<hr/>\n' else: return '\n<hr>\n' def list(self, text, is_ordered): if is_ordered: return '\n<ol>%s</ol>\n' % text else: return '\n<ul>%s</ul>\n' % text def list_item(self, text, is_ordered): return '<li>%s</li>\n' % text def paragraph(self, text): # No hard wrapping yet. Maybe with: # http://docs.python.org/library/textwrap.html return '\n<p>%s</p>\n' % text def table(self, header, body): return '\n<table><thead>\n%s</thead><tbody>\n%s</tbody></table>\n' % \ (header, body) def table_row(self, text): return '<tr>\n%s</tr>\n' % text def table_cell(self, text, flags): flags = flags & TABLE_ALIGNMASK if flags == TABLE_ALIGN_C: align = 'align="center"' elif flags == TABLE_ALIGN_L: align = 'align="left"' elif flags == TABLE_ALIGN_R: align = 'align="right"' else: align = '' if flags & TABLE_HEADER: return '<th%s>%s</th>\n' % (align, text) else: return '<td%s>%s</td>\n' % (align, text) def autolink(self, link, is_email): if is_email: return '<a href="mailto:%(link)s">%(link)s</a>' % {'link': link} else: return '<a href="%(link)s">%(link)s</a>' % {'link': link} def preprocess(self, text): return text.replace(' ', '_') md = Markdown(BleepRenderer(), EXT_FENCED_CODE | EXT_TABLES | EXT_AUTOLINK | EXT_STRIKETHROUGH | EXT_SUPERSCRIPT) print(md.render(''' Unordered - One - Two - Three And now ordered: 1. Three 2. Two 3. One An email: example@example.com And an URL: http://example.com '''))
{ "content_hash": "3cc96bd5578c538a61a01a84e0e8b2a1", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 78, "avg_line_length": 25.742574257425744, "alnum_prop": 0.5373076923076923, "repo_name": "hhatto/python-hoedown", "id": "871f9d647a8ce835e1b9294a55164bca63acffe5", "size": "2625", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "benchmark/test.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "157343" }, { "name": "HTML", "bytes": "158078" }, { "name": "Makefile", "bytes": "2031" }, { "name": "Perl", "bytes": "3890" }, { "name": "Python", "bytes": "50139" } ], "symlink_target": "" }
import pytest from cutadapt.__main__ import ( main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging, ) def test_help(): with pytest.raises(SystemExit) as e: main(["--help"]) assert e.value.args[0] == 0 def test_parse_cutoffs(): assert parse_cutoffs("5") == (0, 5) assert parse_cutoffs("6,7") == (6, 7) with pytest.raises(CommandLineError): parse_cutoffs("a,7") with pytest.raises(CommandLineError): parse_cutoffs("a") with pytest.raises(CommandLineError): parse_cutoffs("a,7") with pytest.raises(CommandLineError): parse_cutoffs("1,2,3") def test_parse_lengths(): assert parse_lengths("25") == (25,) assert parse_lengths("17:25") == (17, 25) assert parse_lengths("25:") == (25, None) assert parse_lengths(":25") == (None, 25) with pytest.raises(CommandLineError): parse_lengths("1:2:3") with pytest.raises(CommandLineError): parse_lengths("a:2") with pytest.raises(CommandLineError): parse_lengths("a") with pytest.raises(CommandLineError): parse_lengths("2:a") with pytest.raises(CommandLineError): parse_lengths(":") def test_setup_logging(): import logging logger = logging.getLogger(__name__) setup_logging(logger, log_to_stderr=False, quiet=False, minimal=False, debug=False) logger.info("Log message") setup_logging(logger, log_to_stderr=False, debug=1) setup_logging(logger, log_to_stderr=False, quiet=True) setup_logging(logger, log_to_stderr=False, minimal=True)
{ "content_hash": "0b5919e5a79aa7d064cc068526dc2f0d", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 87, "avg_line_length": 28.428571428571427, "alnum_prop": 0.6413316582914573, "repo_name": "marcelm/cutadapt", "id": "ee137bfc7cad935abf7ef378916b455cd1bf864e", "size": "1592", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/test_main.py", "mode": "33188", "license": "mit", "language": [ { "name": "Cython", "bytes": "32768" }, { "name": "Python", "bytes": "383277" } ], "symlink_target": "" }
from .test_base_doc import AbstractTestDoc class TestLatinDoc(AbstractTestDoc): text = 'Omnia gallia\nin tres part-\nes divisa est.' language = 'latin' def test_normalize(self): self.ready() test = self.doc.normalize() compare = 'Omnia gallia\nin tres part-\nes diuisa est.' return self.assertEqual(test, compare) def test_stemmify(self): self.ready() test = self.doc.stemmify() compare = 'omn gallia\nin tr part-\n divis est. ' return self.assertEqual(test, compare)
{ "content_hash": "d3fd602b83c59063d26f4216ee98ca7a", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 63, "avg_line_length": 30.666666666666668, "alnum_prop": 0.6413043478260869, "repo_name": "thePortus/arakhne", "id": "d6761f6fd8318d72add3a718f26cb07e2f926642", "size": "552", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "arakhne/doc/tests/test_latin_doc.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "84723" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals import base64 import errno import hashlib import json import os import shutil from StringIO import StringIO from django.core.files import temp as tempfile from django.core.files.uploadedfile import SimpleUploadedFile from django.http.multipartparser import MultiPartParser from django.test import TestCase, client from django.utils import unittest from . import uploadhandler from .models import FileModel, temp_storage, UPLOAD_TO UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg' class FileUploadTests(TestCase): def test_simple_upload(self): with open(__file__, 'rb') as fp: post_data = { 'name': 'Ringo', 'file_field': fp, } response = self.client.post('/file_uploads/upload/', post_data) self.assertEqual(response.status_code, 200) def test_large_upload(self): tdir = tempfile.gettempdir() file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir) file1.write(b'a' * (2 ** 21)) file1.seek(0) file2 = tempfile.NamedTemporaryFile(suffix=".file2", dir=tdir) file2.write(b'a' * (10 * 2 ** 20)) file2.seek(0) post_data = { 'name': 'Ringo', 'file_field1': file1, 'file_field2': file2, } for key in post_data.keys(): try: post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest() post_data[key].seek(0) except AttributeError: post_data[key + '_hash'] = hashlib.sha1(post_data[key]).hexdigest() response = self.client.post('/file_uploads/verify/', post_data) self.assertEqual(response.status_code, 200) def test_base64_upload(self): test_string = "This data will be transmitted base64-encoded." payload = "\r\n".join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file"; filename="test.txt"', 'Content-Type: application/octet-stream', 'Content-Transfer-Encoding: base64', '', base64.b64encode(test_string), '--' + client.BOUNDARY + '--', '', ]).encode('utf-8') r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/file_uploads/echo_content/", 'REQUEST_METHOD': 'POST', 'wsgi.input': client.FakePayload(payload), } response = self.client.request(**r) received = json.loads(response.content) self.assertEqual(received['file'], test_string) def test_unicode_file_name(self): tdir = tempfile.gettempdir() # This file contains chinese symbols and an accented char in the name. with open(os.path.join(tdir, UNICODE_FILENAME.encode('utf-8')), 'w+b') as file1: file1.write(b'b' * (2 ** 10)) file1.seek(0) post_data = { 'file_unicode': file1, } response = self.client.post('/file_uploads/unicode_name/', post_data) try: os.unlink(file1.name) except: pass self.assertEqual(response.status_code, 200) def test_dangerous_file_names(self): """Uploaded file names should be sanitized before ever reaching the view.""" # This test simulates possible directory traversal attacks by a # malicious uploader We have to do some monkeybusiness here to construct # a malicious payload with an invalid file name (containing os.sep or # os.pardir). This similar to what an attacker would need to do when # trying such an attack. scary_file_names = [ "/tmp/hax0rd.txt", # Absolute path, *nix-style. "C:\\Windows\\hax0rd.txt", # Absolute path, win-syle. "C:/Windows/hax0rd.txt", # Absolute path, broken-style. "\\tmp\\hax0rd.txt", # Absolute path, broken in a different way. "/tmp\\hax0rd.txt", # Absolute path, broken by mixing. "subdir/hax0rd.txt", # Descendant path, *nix-style. "subdir\\hax0rd.txt", # Descendant path, win-style. "sub/dir\\hax0rd.txt", # Descendant path, mixed. "../../hax0rd.txt", # Relative path, *nix-style. "..\\..\\hax0rd.txt", # Relative path, win-style. "../..\\hax0rd.txt" # Relative path, mixed. ] payload = [] for i, name in enumerate(scary_file_names): payload.extend([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name), 'Content-Type: application/octet-stream', '', 'You got pwnd.' ]) payload.extend([ '--' + client.BOUNDARY + '--', '', ]) payload = "\r\n".join(payload).encode('utf-8') r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/file_uploads/echo/", 'REQUEST_METHOD': 'POST', 'wsgi.input': client.FakePayload(payload), } response = self.client.request(**r) # The filenames should have been sanitized by the time it got to the view. recieved = json.loads(response.content) for i, name in enumerate(scary_file_names): got = recieved["file%s" % i] self.assertEqual(got, "hax0rd.txt") def test_filename_overflow(self): """File names over 256 characters (dangerous on some platforms) get fixed up.""" name = "%s.txt" % ("f"*500) payload = "\r\n".join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file"; filename="%s"' % name, 'Content-Type: application/octet-stream', '', 'Oops.' '--' + client.BOUNDARY + '--', '', ]).encode('utf-8') r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/file_uploads/echo/", 'REQUEST_METHOD': 'POST', 'wsgi.input': client.FakePayload(payload), } got = json.loads(self.client.request(**r).content) self.assertTrue(len(got['file']) < 256, "Got a long file name (%s characters)." % len(got['file'])) def test_truncated_multipart_handled_gracefully(self): """ If passed an incomplete multipart message, MultiPartParser does not attempt to read beyond the end of the stream, and simply will handle the part that can be parsed gracefully. """ payload = "\r\n".join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file"; filename="foo.txt"', 'Content-Type: application/octet-stream', '', 'file contents' '--' + client.BOUNDARY + '--', '', ]).encode('utf-8') payload = payload[:-10] r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': '/file_uploads/echo/', 'REQUEST_METHOD': 'POST', 'wsgi.input': client.FakePayload(payload), } got = json.loads(self.client.request(**r).content) self.assertEqual(got, {}) def test_empty_multipart_handled_gracefully(self): """ If passed an empty multipart message, MultiPartParser will return an empty QueryDict. """ r = { 'CONTENT_LENGTH': 0, 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': '/file_uploads/echo/', 'REQUEST_METHOD': 'POST', 'wsgi.input': client.FakePayload(b''), } got = json.loads(self.client.request(**r).content) self.assertEqual(got, {}) def test_custom_upload_handler(self): # A small file (under the 5M quota) smallfile = tempfile.NamedTemporaryFile() smallfile.write(b'a' * (2 ** 21)) smallfile.seek(0) # A big file (over the quota) bigfile = tempfile.NamedTemporaryFile() bigfile.write(b'a' * (10 * 2 ** 20)) bigfile.seek(0) # Small file posting should work. response = self.client.post('/file_uploads/quota/', {'f': smallfile}) got = json.loads(response.content) self.assertTrue('f' in got) # Large files don't go through. response = self.client.post("/file_uploads/quota/", {'f': bigfile}) got = json.loads(response.content) self.assertTrue('f' not in got) def test_broken_custom_upload_handler(self): f = tempfile.NamedTemporaryFile() f.write(b'a' * (2 ** 21)) f.seek(0) # AttributeError: You cannot alter upload handlers after the upload has been processed. self.assertRaises( AttributeError, self.client.post, '/file_uploads/quota/broken/', {'f': f} ) def test_fileupload_getlist(self): file1 = tempfile.NamedTemporaryFile() file1.write(b'a' * (2 ** 23)) file1.seek(0) file2 = tempfile.NamedTemporaryFile() file2.write(b'a' * (2 * 2 ** 18)) file2.seek(0) file2a = tempfile.NamedTemporaryFile() file2a.write(b'a' * (5 * 2 ** 20)) file2a.seek(0) response = self.client.post('/file_uploads/getlist_count/', { 'file1': file1, 'field1': 'test', 'field2': 'test3', 'field3': 'test5', 'field4': 'test6', 'field5': 'test7', 'file2': (file2, file2a) }) got = json.loads(response.content) self.assertEqual(got.get('file1'), 1) self.assertEqual(got.get('file2'), 2) def test_file_error_blocking(self): """ The server should not block when there are upload errors (bug #8622). This can happen if something -- i.e. an exception handler -- tries to access POST while handling an error in parsing POST. This shouldn't cause an infinite loop! """ class POSTAccessingHandler(client.ClientHandler): """A handler that'll access POST during an exception.""" def handle_uncaught_exception(self, request, resolver, exc_info): ret = super(POSTAccessingHandler, self).handle_uncaught_exception(request, resolver, exc_info) p = request.POST return ret # Maybe this is a little more complicated that it needs to be; but if # the django.test.client.FakePayload.read() implementation changes then # this test would fail. So we need to know exactly what kind of error # it raises when there is an attempt to read more than the available bytes: try: client.FakePayload(b'a').read(2) except Exception as reference_error: pass # install the custom handler that tries to access request.POST self.client.handler = POSTAccessingHandler() with open(__file__, 'rb') as fp: post_data = { 'name': 'Ringo', 'file_field': fp, } try: response = self.client.post('/file_uploads/upload_errors/', post_data) except reference_error.__class__ as err: self.assertFalse( str(err) == str(reference_error), "Caught a repeated exception that'll cause an infinite loop in file uploads." ) except Exception as err: # CustomUploadError is the error that should have been raised self.assertEqual(err.__class__, uploadhandler.CustomUploadError) def test_filename_case_preservation(self): """ The storage backend shouldn't mess with the case of the filenames uploaded. """ # Synthesize the contents of a file upload with a mixed case filename # so we don't have to carry such a file in the Django tests source code # tree. vars = {'boundary': 'oUrBoUnDaRyStRiNg'} post_data = [ '--%(boundary)s', 'Content-Disposition: form-data; name="file_field"; ' 'filename="MiXeD_cAsE.txt"', 'Content-Type: application/octet-stream', '', 'file contents\n' '', '--%(boundary)s--\r\n', ] response = self.client.post( '/file_uploads/filename_case/', '\r\n'.join(post_data) % vars, 'multipart/form-data; boundary=%(boundary)s' % vars ) self.assertEqual(response.status_code, 200) id = int(response.content) obj = FileModel.objects.get(pk=id) # The name of the file uploaded and the file stored in the server-side # shouldn't differ. self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt') class DirectoryCreationTests(unittest.TestCase): """ Tests for error handling during directory creation via _save_FIELD_file (ticket #6450) """ def setUp(self): self.obj = FileModel() if not os.path.isdir(temp_storage.location): os.makedirs(temp_storage.location) if os.path.isdir(UPLOAD_TO): os.chmod(UPLOAD_TO, 0700) shutil.rmtree(UPLOAD_TO) def tearDown(self): os.chmod(temp_storage.location, 0700) shutil.rmtree(temp_storage.location) def test_readonly_root(self): """Permission errors are not swallowed""" os.chmod(temp_storage.location, 0500) try: self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x')) except OSError as err: self.assertEqual(err.errno, errno.EACCES) except Exception: self.fail("OSError [Errno %s] not raised." % errno.EACCES) def test_not_a_directory(self): """The correct IOError is raised when the upload directory name exists but isn't a directory""" # Create a file with the upload directory name open(UPLOAD_TO, 'wb').close() try: self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x')) except IOError as err: # The test needs to be done on a specific string as IOError # is raised even without the patch (just not early enough) self.assertEqual(err.args[0], "%s exists and is not a directory." % UPLOAD_TO) except: self.fail("IOError not raised") class MultiParserTests(unittest.TestCase): def test_empty_upload_handlers(self): # We're not actually parsing here; just checking if the parser properly # instantiates with empty upload handlers. parser = MultiPartParser({ 'CONTENT_TYPE': 'multipart/form-data; boundary=_foo', 'CONTENT_LENGTH': '1' }, StringIO('x'), [], 'utf-8')
{ "content_hash": "b17840b264497008ca224f79e22c2760", "timestamp": "", "source": "github", "line_count": 403, "max_line_length": 110, "avg_line_length": 38.178660049627794, "alnum_prop": 0.5614194722474978, "repo_name": "kennethlove/django", "id": "a7424639b4f49aa8e0a90aaf94076aad6198f8f9", "size": "15416", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "tests/regressiontests/file_uploads/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "89077" }, { "name": "Python", "bytes": "8102530" }, { "name": "Shell", "bytes": "4241" } ], "symlink_target": "" }
"""Hook to run tf.GraphKeys.UPDATE_OPS.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf class UpdateOpsHook(tf.train.SessionRunHook): """Hook to run assign_ops.""" def before_run(self, run_context): del run_context update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) return tf.train.SessionRunArgs(update_ops)
{ "content_hash": "6c2e9dd718d386cf2c1eb3ee428cd759", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 59, "avg_line_length": 28.133333333333333, "alnum_prop": 0.7298578199052133, "repo_name": "mlperf/training_results_v0.5", "id": "c2615c661cead4a997cf9ee9f00ad385700ffec4", "size": "1028", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/utils/update_ops_hook.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "5720" }, { "name": "C++", "bytes": "1288180" }, { "name": "CMake", "bytes": "40880" }, { "name": "CSS", "bytes": "32420" }, { "name": "Cuda", "bytes": "1362093" }, { "name": "Dockerfile", "bytes": "19488" }, { "name": "Go", "bytes": "1088660" }, { "name": "HTML", "bytes": "19756888" }, { "name": "Java", "bytes": "45405" }, { "name": "JavaScript", "bytes": "302838" }, { "name": "Jupyter Notebook", "bytes": "9104667" }, { "name": "Lua", "bytes": "4430" }, { "name": "Makefile", "bytes": "3652" }, { "name": "Python", "bytes": "31508548" }, { "name": "Scala", "bytes": "106211" }, { "name": "Shell", "bytes": "409745" } ], "symlink_target": "" }
import logging import os import sys import numpy as np import tensorflow as tf from ludwig.constants import AUDIO, BACKFILL, TIED, TYPE, COLUMN, PROC_COLUMN, \ PREPROCESSING, NAME from ludwig.encoders.sequence_encoders import StackedCNN, ParallelCNN, \ StackedParallelCNN, StackedRNN, SequencePassthroughEncoder, StackedCNNRNN from ludwig.features.sequence_feature import SequenceInputFeature from ludwig.utils.audio_utils import calculate_mean, calculate_var from ludwig.utils.audio_utils import get_fbank from ludwig.utils.audio_utils import get_group_delay from ludwig.utils.audio_utils import get_length_in_samp from ludwig.utils.audio_utils import get_max_length_stft_based from ludwig.utils.audio_utils import get_non_symmetric_length from ludwig.utils.audio_utils import get_phase_stft_magnitude from ludwig.utils.audio_utils import get_stft_magnitude from ludwig.utils.data_utils import get_abs_path from ludwig.utils.misc_utils import set_default_value from ludwig.utils.misc_utils import set_default_values logger = logging.getLogger(__name__) class AudioFeatureMixin(object): type = AUDIO preprocessing_defaults = { 'audio_file_length_limit_in_s': 7.5, 'missing_value_strategy': BACKFILL, 'in_memory': True, 'padding_value': 0, 'norm': None, 'audio_feature': { TYPE: 'raw', } } @staticmethod def cast_column(feature, dataset_df, backend): return dataset_df @staticmethod def get_feature_meta(column, preprocessing_parameters, backend): try: import soundfile except ImportError: logger.error( ' soundfile is not installed. ' 'In order to install all audio feature dependencies run ' 'pip install ludwig[audio]' ) sys.exit(-1) audio_feature_dict = preprocessing_parameters['audio_feature'] first_audio_file_path = column.head(1)[0] _, sampling_rate_in_hz = soundfile.read(first_audio_file_path) feature_dim = AudioFeatureMixin._get_feature_dim(audio_feature_dict, sampling_rate_in_hz) audio_file_length_limit_in_s = preprocessing_parameters[ 'audio_file_length_limit_in_s'] max_length = AudioFeatureMixin._get_max_length_feature( audio_feature_dict, sampling_rate_in_hz, audio_file_length_limit_in_s) return { 'feature_dim': feature_dim, 'sampling_rate_in_hz': sampling_rate_in_hz, 'max_length': max_length, 'reshape': (max_length, feature_dim) } @staticmethod def _get_feature_dim(audio_feature_dict, sampling_rate_in_hz): feature_type = audio_feature_dict[TYPE] if feature_type == 'raw': feature_dim = 1 elif feature_type == 'stft_phase': feature_dim_symmetric = get_length_in_samp( audio_feature_dict['window_length_in_s'], sampling_rate_in_hz) feature_dim = 2 * get_non_symmetric_length(feature_dim_symmetric) elif feature_type in ['stft', 'group_delay']: feature_dim_symmetric = get_length_in_samp( audio_feature_dict['window_length_in_s'], sampling_rate_in_hz) feature_dim = get_non_symmetric_length(feature_dim_symmetric) elif feature_type == 'fbank': feature_dim = audio_feature_dict['num_filter_bands'] else: raise ValueError('{} is not recognized.'.format(feature_type)) return feature_dim @staticmethod def _process_in_memory( column, src_path, audio_feature_dict, feature_dim, max_length, padding_value, normalization_type, audio_file_length_limit_in_s, backend ): try: import soundfile except ImportError: logger.error( ' soundfile is not installed. ' 'In order to install all audio feature dependencies run ' 'pip install ludwig[audio]' ) sys.exit(-1) def read_audio(path): filepath = get_abs_path(src_path, path) return soundfile.read(filepath) df_engine = backend.df_engine raw_audio = df_engine.map_objects(column, read_audio) processed_audio = df_engine.map_objects( raw_audio, lambda row: AudioFeatureMixin._transform_to_feature( audio=row[0], sampling_rate_in_hz=row[1], audio_feature_dict=audio_feature_dict, feature_dim=feature_dim, max_length=max_length, padding_value=padding_value, normalization_type=normalization_type ) ) audio_stats = df_engine.map_objects( raw_audio, lambda row: AudioFeatureMixin._get_stats( audio=row[0], sampling_rate_in_hz=row[1], max_length_in_s=audio_file_length_limit_in_s, ) ) def reduce(series): merged_stats = None for audio_stats in series: if merged_stats is None: merged_stats = audio_stats.copy() else: AudioFeatureMixin._merge_stats(merged_stats, audio_stats) return merged_stats merged_stats = df_engine.reduce_objects(audio_stats, reduce) merged_stats['mean'] = calculate_mean(merged_stats['sum'], merged_stats['count']) merged_stats['var'] = calculate_var(merged_stats['sum'], merged_stats['sum2'], merged_stats['count']) return processed_audio, merged_stats @staticmethod def _transform_to_feature( audio, sampling_rate_in_hz, audio_feature_dict, feature_dim, max_length, padding_value, normalization_type ): feature_type = audio_feature_dict[TYPE] if feature_type == 'raw': audio_feature = np.expand_dims(audio, axis=-1) elif feature_type in ['stft', 'stft_phase', 'group_delay', 'fbank']: audio_feature = np.transpose( AudioFeatureMixin._get_2D_feature(audio, feature_type, audio_feature_dict, sampling_rate_in_hz)) else: raise ValueError('{} is not recognized.'.format(feature_type)) if normalization_type == 'per_file': mean = np.mean(audio_feature, axis=0) std = np.std(audio_feature, axis=0) audio_feature = np.divide((audio_feature - mean), std + 1.0e-10) elif normalization_type == 'global': raise ValueError('not implemented yet') feature_length = audio_feature.shape[0] broadcast_feature_length = min(feature_length, max_length) audio_feature_padded = np.full((max_length, feature_dim), padding_value, dtype=np.float32) audio_feature_padded[:broadcast_feature_length, :] = audio_feature[ :max_length, :] return audio_feature_padded @staticmethod def _get_stats(audio, sampling_rate_in_hz, max_length_in_s): audio_length_in_s = audio.shape[-1] / float(sampling_rate_in_hz) return { 'count': 1, 'sum': audio_length_in_s, 'sum2': audio_length_in_s * audio_length_in_s, 'min': audio_length_in_s, 'max': audio_length_in_s, 'cropped': 1 if audio_length_in_s > max_length_in_s else 0 } @staticmethod def _merge_stats(merged_stats, audio_stats): merged_stats['count'] += audio_stats['count'] merged_stats['sum'] += audio_stats['sum'] merged_stats['sum2'] += audio_stats['sum2'] merged_stats['min'] = min(merged_stats['min'], audio_stats['min']) merged_stats['max'] = max(merged_stats['max'], audio_stats['max']) merged_stats['cropped'] += audio_stats['cropped'] @staticmethod def _get_2D_feature(audio, feature_type, audio_feature_dict, sampling_rate_in_hz): window_length_in_s = audio_feature_dict['window_length_in_s'] window_shift_in_s = audio_feature_dict['window_shift_in_s'] window_length_in_samp = get_length_in_samp(window_length_in_s, sampling_rate_in_hz) if 'num_fft_points' in audio_feature_dict: num_fft_points = audio_feature_dict['num_fft_points'] if num_fft_points < window_length_in_samp: raise ValueError( 'num_fft_points: {} < window length in ' 'samples: {} (corresponds to window length' ' in s: {}'.format(num_fft_points, window_length_in_s, window_length_in_samp)) else: num_fft_points = window_length_in_samp if 'window_type' in audio_feature_dict: window_type = audio_feature_dict['window_type'] else: window_type = 'hamming' if feature_type == 'stft_phase': return get_phase_stft_magnitude(audio, sampling_rate_in_hz, window_length_in_s, window_shift_in_s, num_fft_points, window_type) if feature_type == 'stft': return get_stft_magnitude(audio, sampling_rate_in_hz, window_length_in_s, window_shift_in_s, num_fft_points, window_type) if feature_type == 'group_delay': return get_group_delay(audio, sampling_rate_in_hz, window_length_in_s, window_shift_in_s, num_fft_points, window_type) if feature_type == 'fbank': num_filter_bands = audio_feature_dict['num_filter_bands'] return get_fbank(audio, sampling_rate_in_hz, window_length_in_s, window_shift_in_s, num_fft_points, window_type, num_filter_bands) @staticmethod def add_feature_data( feature, input_df, proc_df, metadata, preprocessing_parameters, backend ): set_default_value( feature['preprocessing'], 'in_memory', preprocessing_parameters['in_memory'] ) if 'audio_feature' not in preprocessing_parameters: raise ValueError( 'audio_feature dictionary has to be present in preprocessing ' 'for audio.') if TYPE not in preprocessing_parameters['audio_feature']: raise ValueError( 'type has to be present in audio_feature dictionary ' 'for audio.') name = feature[NAME] column = feature[COLUMN] proc_column = feature[PROC_COLUMN] src_path = None # this is not super nice, but works both and DFs and lists first_path = '.' for first_path in input_df[column]: break if hasattr(input_df, 'src'): src_path = os.path.dirname(os.path.abspath(input_df.src)) if src_path is None and not os.path.isabs(first_path): raise ValueError('Audio file paths must be absolute') num_audio_utterances = len(input_df) padding_value = preprocessing_parameters['padding_value'] normalization_type = preprocessing_parameters['norm'] feature_dim = metadata[name]['feature_dim'] max_length = metadata[name]['max_length'] audio_feature_dict = preprocessing_parameters['audio_feature'] audio_file_length_limit_in_s = preprocessing_parameters[ 'audio_file_length_limit_in_s'] if num_audio_utterances == 0: raise ValueError( 'There are no audio files in the dataset provided.') if feature[PREPROCESSING]['in_memory']: audio_features, audio_stats = AudioFeatureMixin._process_in_memory( input_df[feature[NAME]], src_path, audio_feature_dict, feature_dim, max_length, padding_value, normalization_type, audio_file_length_limit_in_s, backend ) proc_df[proc_column] = audio_features audio_stats['std'] = np.sqrt( audio_stats['var'] / float(audio_stats['count'])) print_statistics = ( "{} audio files loaded.\n" "Statistics of audio file lengths:\n" "- mean: {:.4f}\n" "- std: {:.4f}\n" "- max: {:.4f}\n" "- min: {:.4f}\n" "- cropped audio_files: {}\n" "Max length was given as {}s" ).format( audio_stats['count'], audio_stats['mean'], audio_stats['std'], audio_stats['max'], audio_stats['min'], audio_stats['cropped'], audio_file_length_limit_in_s) logger.debug(print_statistics) else: backend.check_lazy_load_supported(feature) return proc_df @staticmethod def _get_max_length_feature( audio_feature_dict, sampling_rate_in_hz, audio_length_limit_in_s ): feature_type = audio_feature_dict[TYPE] audio_length_limit_in_samp = ( audio_length_limit_in_s * sampling_rate_in_hz ) if not audio_length_limit_in_samp.is_integer(): raise ValueError( 'Audio_file_length_limit has to be chosen ' 'so that {} (in s) * {} (sampling rate in Hz) ' 'is an integer.'.format( audio_length_limit_in_s, sampling_rate_in_hz)) audio_length_limit_in_samp = int(audio_length_limit_in_samp) if feature_type == 'raw': return audio_length_limit_in_samp elif feature_type in ['stft', 'stft_phase', 'group_delay', 'fbank']: window_length_in_s = audio_feature_dict['window_length_in_s'] window_shift_in_s = audio_feature_dict['window_shift_in_s'] return get_max_length_stft_based(audio_length_limit_in_samp, window_length_in_s, window_shift_in_s, sampling_rate_in_hz) else: raise ValueError('{} is not recognized.'.format(feature_type)) class AudioInputFeature(AudioFeatureMixin, SequenceInputFeature): encoder = 'parallel_cnn' max_sequence_length = None embedding_size = None def __init__(self, feature, encoder_obj=None): super().__init__(feature, encoder_obj=encoder_obj) if not self.embedding_size: raise ValueError( 'embedding_size has to be defined - ' 'check "update_config_with_metadata()"') if not self.max_sequence_length: raise ValueError( 'max_sequence_length has to be defined - ' 'check "update_config_with_metadata()"') def call(self, inputs, training=None, mask=None): assert isinstance(inputs, tf.Tensor) assert inputs.dtype == tf.float32 assert len(inputs.shape) == 3 encoder_output = self.encoder_obj( inputs, training=training, mask=mask ) return encoder_output @classmethod def get_input_dtype(cls): return tf.float32 def get_input_shape(self): return self.max_sequence_length, self.embedding_size @staticmethod def update_config_with_metadata( input_feature, feature_metadata, *args, **kwargs ): input_feature['max_sequence_length'] = feature_metadata['max_length'] input_feature['embedding_size'] = feature_metadata['feature_dim'] input_feature['should_embed'] = False @staticmethod def populate_defaults(input_feature): set_default_values( input_feature, { TIED: None, 'preprocessing': {} } ) encoder_registry = { 'stacked_cnn': StackedCNN, 'parallel_cnn': ParallelCNN, 'stacked_parallel_cnn': StackedParallelCNN, 'rnn': StackedRNN, 'cnnrnn': StackedCNNRNN, 'passthrough': SequencePassthroughEncoder, 'null': SequencePassthroughEncoder, 'none': SequencePassthroughEncoder, 'None': SequencePassthroughEncoder, None: SequencePassthroughEncoder }
{ "content_hash": "c350ad8f94d235c628bbd082865eede8", "timestamp": "", "source": "github", "line_count": 450, "max_line_length": 109, "avg_line_length": 38.41777777777778, "alnum_prop": 0.5513072651550208, "repo_name": "uber/ludwig", "id": "e9dee871ec40e856a628eead0d048748bf3b744e", "size": "17998", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ludwig/features/audio_feature.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "466847" }, { "name": "Dockerfile", "bytes": "635" }, { "name": "HTML", "bytes": "292184" }, { "name": "JavaScript", "bytes": "85725" }, { "name": "Python", "bytes": "1241008" } ], "symlink_target": "" }
import kfp from kfp import dsl def print_op(msg): """Print a message.""" return dsl.ContainerOp( name='Print', image='alpine:3.6', command=['echo', msg], ) @dsl.pipeline( name='Pipeline service account', description='The pipeline shows how to set the max number of parallel pods in a pipeline.' ) def pipeline_parallelism(): op1 = print_op('hey, what are you up to?') op2 = print_op('train my model.') dsl.get_pipeline_conf().set_parallelism(1) if __name__ == '__main__': kfp.compiler.Compiler().compile(pipeline_parallelism, __file__ + '.yaml')
{ "content_hash": "c261ecd508c9180778cf670f2f9bc678", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 94, "avg_line_length": 24.625, "alnum_prop": 0.6480541455160744, "repo_name": "kubeflow/kfp-tekton-backend", "id": "e2def4585683d54d1883670b581d6b11010d923d", "size": "1191", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "samples/core/pipeline_parallelism/pipeline_parallelism_limits.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "47293" }, { "name": "Go", "bytes": "1269081" }, { "name": "HTML", "bytes": "3584" }, { "name": "JavaScript", "bytes": "24828" }, { "name": "Jupyter Notebook", "bytes": "177616" }, { "name": "Makefile", "bytes": "9694" }, { "name": "PowerShell", "bytes": "3194" }, { "name": "Python", "bytes": "1628570" }, { "name": "Scala", "bytes": "13000" }, { "name": "Shell", "bytes": "180020" }, { "name": "Smarty", "bytes": "7694" }, { "name": "Starlark", "bytes": "76037" }, { "name": "TypeScript", "bytes": "1641150" } ], "symlink_target": "" }
""" Helper script to dump all information for an architecture """ import argparse import html from ppci import api from ppci.arch import encoding arch = api.get_arch('msp430') arch = api.get_arch('x86_64') def mkstr(s): if isinstance(s, str): return s elif isinstance(s, encoding.Operand): return '${}'.format(s._name) else: raise NotImplementedError() filename = 'arch_info.html' with open(filename, 'w') as f: print("""<html> <body> """, file=f) # Create a list: instructions = [] for i in arch.isa.instructions: if not i.syntax: continue syntax = ''.join(mkstr(s) for s in i.syntax.syntax) instructions.append((syntax, i)) print('<h1>Instructions</h1>', file=f) print('<p>{} instructions available</p>'.format(len(instructions)), file=f) print('<table border="1">', file=f) print('<tr><th>syntax</th><th>Class</th></tr>', file=f) for syntax, ins_class in sorted(instructions, key=lambda x: x[0]): print('<tr>'.format(), file=f) print('<td>{}</td>'.format(html.escape(syntax)), file=f) print('<td>{}</td>'.format(html.escape(str(ins_class))), file=f) print('</tr>'.format(), file=f) print('</table>', file=f) print("""</body> </html> """, file=f)
{ "content_hash": "a1fb45d80e4c70a559847253076f16a2", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 79, "avg_line_length": 27.541666666666668, "alnum_prop": 0.5885022692889561, "repo_name": "windelbouwman/ppci-mirror", "id": "0aa487ecb85807e73337431541943c0cfb7f60cf", "size": "1323", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/dump_arch.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Assembly", "bytes": "94" }, { "name": "Brainfuck", "bytes": "5867" }, { "name": "C", "bytes": "229265" }, { "name": "C++", "bytes": "1257" }, { "name": "Coq", "bytes": "98028" }, { "name": "HTML", "bytes": "363" }, { "name": "JavaScript", "bytes": "2165" }, { "name": "LLVM", "bytes": "11206" }, { "name": "Python", "bytes": "2991165" }, { "name": "Shell", "bytes": "960" }, { "name": "Verilog", "bytes": "9363" } ], "symlink_target": "" }
from datetime import datetime from threading import Thread import psutil import time old_status = None current_nic_status = None # convert bytes to kilobytes def bytes_to_kb(byts): return byts / 1024 # loop every second to update records def update_nics(): global old_status global current_nic_status while True: # get current net status new_stats = psutil.net_io_counters(pernic=True) # get 1 second difference in KB current_nic_status = list() nic_names = list() for nic in old_status.keys(): nic_names.append(nic) for i in range(0, len(old_status)): diff_sent = round(bytes_to_kb(new_stats[nic_names[i]].bytes_sent - old_status[nic_names[i]].bytes_sent), 3) diff_recv = round(bytes_to_kb(new_stats[nic_names[i]].bytes_recv - old_status[nic_names[i]].bytes_recv), 3) current_nic_status.append(NIC(nic_names[i], diff_sent, diff_recv)) old_status = new_stats time.sleep(1) class Network: def __init__(self): global old_status # initial list old_status = psutil.net_io_counters(pernic=True) # start thread process thd = Thread(target=update_nics) thd.daemon = True thd.start() # retrieve NICs update def get_nic_status(self): global current_nic_status # NICs return current_nic_status class NIC: def __init__(self, _name, _sent, _recv): self.name = _name self.sent = _sent self.recv = _recv
{ "content_hash": "60c222e945a77221bbf65c930b3d320f", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 119, "avg_line_length": 24.453125, "alnum_prop": 0.6095846645367412, "repo_name": "Atomicbeast101/atomic-monitor", "id": "7b18bc0f0787ff4e0d03a854ec642fcdf49be461", "size": "1565", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "agent/bin/network.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "28310" }, { "name": "HTML", "bytes": "26092" }, { "name": "JavaScript", "bytes": "25994" }, { "name": "PHP", "bytes": "888" }, { "name": "Python", "bytes": "67614" } ], "symlink_target": "" }
"""Tests for text_util.""" from absl.testing import parameterized import tensorflow as tf from tensorflow_model_analysis.experimental.preprocessing_functions import text class TextTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ('EmptyString', [''], [[]]), ('SingleString', ['Test foo Bar'], [['test', 'foo', 'bar']]), ('BatchedString', ['app dog', 'test foo bar'], [['app', 'dog', ''], ['test', 'foo', 'bar']]), ) def testWhitespaceTokenization(self, input_text, expected_output): # TODO(b/194508683) Delete the check when TF1 is deprecated. if tf.__version__ < '2': return actual = text.whitespace_tokenization(input_text).to_tensor() expected = tf.constant(expected_output, dtype=tf.string) self.assertAllEqual(actual, expected) if __name__ == '__main__': tf.test.main()
{ "content_hash": "86ce188de5eb0459d9dba2d35ba03260", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 79, "avg_line_length": 35.34615384615385, "alnum_prop": 0.6224156692056583, "repo_name": "tensorflow/model-analysis", "id": "ae10a64aaa5686b6d4f6c778e4c79ab590a8d456", "size": "1494", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow_model_analysis/experimental/preprocessing_functions/text_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "125312" }, { "name": "JavaScript", "bytes": "1415355" }, { "name": "Python", "bytes": "3261298" }, { "name": "Shell", "bytes": "813" }, { "name": "Starlark", "bytes": "11590" } ], "symlink_target": "" }
from tests import unittest from mock import Mock from ....utils import MockHttpResource, setup_test_homeserver from synapse.types import UserID from twisted.internet import defer PATH_PREFIX = "/_matrix/client/v2_alpha" class V2AlphaRestTestCase(unittest.TestCase): # Consumer must define # USER_ID = <some string> # TO_REGISTER = [<list of REST servlets to register>] @defer.inlineCallbacks def setUp(self): self.mock_resource = MockHttpResource(prefix=PATH_PREFIX) hs = yield setup_test_homeserver( datastore=self.make_datastore_mock(), http_client=None, resource_for_client=self.mock_resource, resource_for_federation=self.mock_resource, ) def _get_user_by_token(token=None): return { "user": UserID.from_string(self.USER_ID), "admin": False, "device_id": None, "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token for r in self.TO_REGISTER: r.register_servlets(hs, self.mock_resource) def make_datastore_mock(self): store = Mock(spec=[ "insert_client_ip", ]) store.get_app_service_by_token = Mock(return_value=None) return store
{ "content_hash": "11c1205e9fc8f7b66ce64ea9175781e6", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 65, "avg_line_length": 27.75, "alnum_prop": 0.6006006006006006, "repo_name": "illicitonion/synapse", "id": "de5a917e6aabbb6065245af6f42ec6b037bf8f8a", "size": "1935", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tests/rest/client/v2_alpha/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "1020" }, { "name": "HTML", "bytes": "1223" }, { "name": "JavaScript", "bytes": "172643" }, { "name": "Perl", "bytes": "31420" }, { "name": "Python", "bytes": "1571632" }, { "name": "Shell", "bytes": "3281" } ], "symlink_target": "" }
"""Setup.py module for the workflow's worker utilities. All the workflow related code is gathered in a package that will be built as a source distribution, staged in the staging area for the workflow being run and then installed in the workers when they start running. This behavior is triggered by specifying the --setup_file command line option when running the workflow for remote execution. """ from distutils.command.build import build as _build import subprocess import setuptools # This class handles the pip install mechanism. class build(_build): # pylint: disable=invalid-name """A build command class that will be invoked during package install. The package built using the current setup.py will be staged and later installed in the worker using `pip install package'. This class will be instantiated during install for this specific scenario and will trigger running the custom commands specified. """ sub_commands = _build.sub_commands + [('CustomCommands', None)] # Some custom command to run during setup. The command is not essential for this # workflow. It is used here as an example. Each command will spawn a child # process. Typically, these commands will include steps to install non-Python # packages. For instance, to install a C++-based library libjpeg62 the following # two commands will have to be added: # # ['apt-get', 'update'], # ['apt-get', '--assume-yes', install', 'libjpeg62'], # # First, note that there is no need to use the sudo command because the setup # script runs with appropriate access. # Second, if apt-get tool is used then the first command needs to be 'apt-get # update' so the tool refreshes itself and initializes links to download # repositories. Without this initial step the other apt-get install commands # will fail with package not found errors. Note also --assume-yes option which # shortcuts the interactive confirmation. # # The output of custom commands (including failures) will be logged in the # worker-startup log. CUSTOM_COMMANDS = [ ] class CustomCommands(setuptools.Command): """A setuptools Command class able to run arbitrary commands.""" def initialize_options(self): pass def finalize_options(self): pass def RunCustomCommand(self, command_list): print('Running command: %s' % command_list) p = subprocess.Popen( command_list, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Can use communicate(input='y\n'.encode()) if the command run requires # some confirmation. stdout_data, _ = p.communicate() print('Command output: %s' % stdout_data) if p.returncode != 0: raise RuntimeError( 'Command %s failed: exit code: %s' % (command_list, p.returncode)) def run(self): for command in CUSTOM_COMMANDS: self.RunCustomCommand(command) # Configure the required packages and scripts to install. # Note that the Python Dataflow containers come with numpy already installed # so this dependency will not trigger anything to be installed unless a version # restriction is specified. REQUIRED_PACKAGES = [ 'timezonefinder', 'pytz' ] setuptools.setup( name='flightsdf', version='0.0.1', description='Data Science on GCP flights analysis pipelines', install_requires=REQUIRED_PACKAGES, packages=setuptools.find_packages(), py_modules=['df07'], cmdclass={ # Command class instantiated and run during pip install scenarios. 'build': build, 'CustomCommands': CustomCommands, } )
{ "content_hash": "43f98956feeaccfc38d3dfc8fbc5af8b", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 84, "avg_line_length": 36.33, "alnum_prop": 0.7162124965593174, "repo_name": "GoogleCloudPlatform/data-science-on-gcp", "id": "55b61239e8b4681245f87e4f0e993a95646426a1", "size": "4418", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "04_streaming/transform/setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "801" }, { "name": "Jupyter Notebook", "bytes": "1503901" }, { "name": "Python", "bytes": "134422" }, { "name": "Shell", "bytes": "18451" } ], "symlink_target": "" }
import MySQLdb as mdb import argparse import os import csv import sys def createDoc(doc): file = open(doc, "w") file.close() def makeID(): #This is important when running this program from the command line because it generates a keywordID file = open("ID.txt", "w") first = str(0) file.write(first) file.close() def uniTable(cur, table_char,dtb): #This just allows for changing between databases with similar basic structure cur.execute("SHOW TABLES FROM %s LIKE '%%%s%%'" % (dtb, table_char)) val=cur.fetchall() table=val[0][0] return table def uniCol(cur, table, col_char, type): cur.execute("SHOW COLUMNS FROM %s LIKE '%%%s%%'" % (table, col_char)) val = cur.fetchall() for i in range(len(val)): if type in val[i][1]: return val[i][0] def writeArtID(cur, dtb): #The ngrams are stored as either an .xml.csv or .txt.csv preceded by the doi of the article. Doi's in mysql are formatted differently, so this method is used to properly format them. table = uniTable(cur, 'oc', dtb) article = uniCol(cur, table, 'doi', 'varchar') cur.execute("SELECT %s FROM %s" % (article, table)) rows=cur.fetchall() articles = [] for i in range(len(rows)): artID = tup2str(rows[i]) if '10.1146/' in artID: artID = artID.replace('10.1146/', '') else: artID = artID.replace('http://www.annualreviews.org/doi/abs/','') articles.append(artID) return articles def nGramSearch(cur, keyword, dtb, doc): #The ngram search is long and tedious. It takes the keyword that is given, and the document that addToDoc() makes. Then, it takes the doi given by the writeArtID() method and looks for the csv that correlates to that article. When it finds the article, it searches for the keyword in the file. If it is found, it checks to make sure the doc Id correlating with the doi is not already stored, and, if it isn't, stores the docID. It has a very long run time, so it is an optional method. table = uniTable(cur, 'oc', dtb) docID = uniCol(cur, table, 'id', 'int') articleID = uniCol(cur, table, 'doi', 'varchar') articles = writeArtID(cur, dtb) file = open(doc, 'r') docList= file.readlines() file.close() artList = [] file=open("error.txt", "w") for i in range(len(articles)): if os.path.isfile('/mnt/AnnualReviews/ngrams_flash0/%s.txt.csv' % articles[i]): f = open('/mnt/AnnualReviews/ngrams_flash0/%s.txt.csv' % articles[i], 'rb') elif os.path.isfile('/mnt/AnnualReviews/ngrams_flash0/%s.xml.csv' % articles[i]): f = open('/mnt/AnnualReviews/ngrams_flash0/%s.xml.csv' % articles[i], 'rb') elif os.path.isfile('/mnt/AnnualReviews/ngrams_flash1/%s.txt.csv' % articles[i]): f = open('/mnt/AnnualReviews/ngrams_flash1/%s.txt.csv' % articles[i], 'rb') elif os.path.isfile('/mnt/AnnualReviews/ngrams_flash1/%s.xml.csv' % articles[i]): f = open('/mnt/AnnualReviews/ngrams_flash1/%s.xml.csv' % articles[i], 'rb') elif os.path.isfile('/mnt/AnnualReviews/ngrams_flash2/%s.txt.csv' % articles[i]): f = open('/mnt/AnnualReviews/ngrams_flash2/%s.xml.csv' % articles[i], 'rb') else: try: f = open('/mnt/AnnualReviews/ngrams_flash2/%s.xml.csv' % articles[i], 'rb') except: e = sys.exc_info()[0] print(articles[i]) file.write("%s\n" % articles[i]) reader = csv.reader(f) for row in reader: if keyword in row: article = '10.1146/'+articles[i] cur.execute("SELECT %s FROM %s WHERE %s = %s" % (docID, table, articleID, article)) rows = cur.fetchall() for k in range(len(docList)): if tup2str(rows[0]) in docList[k]: continue; artList.append(tup2int(rows[0])) file.close() return artList def tup2int(tup): return int('.'.join(str(x) for x in tup)) def tup2str(tup): return str(','.join(str(x) for x in tup)) def addToDoc(cur, ID, dtb, doc, keyword, args = None, ngram = None): #This method, while somewhat long is actually pretty simple. All it does is accept an ID number for the keyword, the doc name of where the docID should be saved, the keyword, and, if only the docID is necessary, a keyword. If you want ngrams, you can specify that too through optional arguments at the command line, or when you call writeOne() or wirteTwo() from another program. Without the Ngrams, it just searches the Titles and Abstracts in the MySQL database, searchign for the keyword. If found, it records the docID. table = uniTable(cur,'oc',dtb) docId = uniCol(cur,table, 'id', 'int') title = uniCol(cur,table, 'itl', 'text') abstract = uniCol(cur,table, 'act', 'text') file = open(doc, "r") check = "Keyword: %s\n" % keyword checkID = "KeywordID: %d\n" % ID for line in file: if checkID == line: print("That is already a documented keyword") return if check == line: print("That is already a documented keyword") return file.close() file = open(doc, "a") cur.execute("SELECT %s FROM %s WHERE %s LIKE '%%%s%%' OR %s LIKE '%%%s%%'" % (docId, table, title, keyword, abstract, keyword)) rows = cur.fetchall() for row in rows: if args==None: print("INSERTED: KEYWORDID: %d, KEYWORD: '%s' , DOCID: %d)" % (ID, keyword, tup2int(row))) file.write("KeywordID: %d\nKeyword: %s\nDocID: %d\n \n" % (ID, keyword, tup2int(row))) if args=="docID": file.write("%d\n" % tup2int(row)) if args=="Keyowrd": file.write("%s\n" % keyword) if args=="KeywordID": file.write("%d\n" % ID) if ngram is not None: artList= nGramSearch(cur, keyword, dtb, doc) for i in range(len(artList)): if args==None: print("INSERTED: KEYWORDID: %d, KEYWORD: '%s' , DOCID: %d)" % (ID, keyword, tup2int(row))) file.write("KeywordID: %d\nKeyword: %s\nDocID: %d\n \n" % (ID, keyword, artList[i])) if args=="docID": file.write("%d\n" % artList[i]) file.close() ID += 1 Id = str(ID) if args ==None or args=="KeywordID": file = open("ID.txt", "a") file.write("%s\n" %Id) file.close() def makeUnion(doc, doc2): #Make union is very simple. It takes two documents which contain the docID's for their separate keyword matches and sees if there are any docIDs that are in both. store=[] Idnum=[line.strip() for line in open(doc)] Idnum2=[line.strip() for line in open(doc2)] for i in range(len(Idnum)): for x in range(len(Idnum2)): if Idnum[i]==Idnum2[x]: store.append(Idnum[i]) if len(store) > 0: file = open("union.txt", "w") for i in range(len(store)): file.write("%s\n" % store[i]) def writeOne(keywd, dtb, arg, ngrams=None): #writeOne and write two should only be used by other programs when they call this one. It is not used at the command line. dbs = mdb.connect(user = "root", passwd = "root", db = dtb) cur = dbs.cursor() ID = 0 doc = "%s.txt" % keywd createDoc(doc) if ngrams is not None: addToDoc(cur, ID, dtb, doc, keywd, args = arg, ngram=ngrams) else: addToDoc(cur, ID, dtb, doc, keywd, args = arg) cur.close() dbs.close() def writeTwo(keywd, keywd2, dtb, arg, ngrams=None): dbs = mdb.connect(user = "root", passwd = "root", db = dtb) cur = dbs.cursor() ID = 0 doc = "%s.txt" % keywd doc2 = "%s.txt" % keywd2 createDoc(doc) createDoc(doc2) if ngrams is not None: addToDoc(cur, ID, dtb, doc, keywd, args = arg, ngram=ngrams) addToDoc(cur, ID, dtb, doc2, keywd2, args = arg, ngram=ngrams) else: addToDoc(cur, ID, dtb, doc, keywd, args = arg) addToDoc(cur, ID, dtb, doc2, keywd2, args = arg) cur.close() dbs.close() if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('keywrd') parser.add_argument('database') parser.add_argument('--ngram') args = parser.parse_args() dbs = mdb.connect(user = "root", passwd = "root", db = args.database); cur = dbs.cursor() doc = "%s.txt" % args.keywrd if os.path.isfile(doc): file = open("ID.txt", "r") lineList = file.readlines() file.close() ID = int(lineList[-1]) if args.ngram: addToDoc(cur, ID, args.database, doc, args.keywrd, ngram=true) else: addToDoc(cur, ID, args.database, doc, args.keywrd) cur.close() dbs.close() else: createDoc(doc) makeID() file = open("ID.txt","r") ID = int(file.read()) if args.ngram: addToDoc(cur, ID, args.database, doc, args.keywrd, ngram=true) else: addToDoc(cur, ID, args.database, doc, args.keywrd) cur.close() dbs.close()
{ "content_hash": "36719f8fb2b28e2818480b621b683ccf", "timestamp": "", "source": "github", "line_count": 213, "max_line_length": 641, "avg_line_length": 41.57276995305164, "alnum_prop": 0.6145680406549971, "repo_name": "RDCEP/BWLT", "id": "8fe44baa2e3eedb830c29da7cb1b36b6b03f2eaa", "size": "8855", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "keywordadd.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "22899" }, { "name": "Shell", "bytes": "4381" }, { "name": "VimL", "bytes": "69" } ], "symlink_target": "" }
from app.constants import S_OK, S_ERR import random import math import base64 import time import ujson as json from app import cfg from app import util def p_img_info_handler(data): cfg.logger.debug('data: %s', data) if not data.get('the_id', ''): return {"success": False} result = util.db_update('bee_img', {'the_id': data.get('the_id', '')}, data, upsert=False) if not result: return {"success": False} return {"success": True}
{ "content_hash": "a05d8a3d70f32134e9140b081c57b322", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 94, "avg_line_length": 23.55, "alnum_prop": 0.6475583864118896, "repo_name": "g0v/LittleBeeGeo", "id": "661932f47e2c17d21679626ad4d3e47be47e569a", "size": "496", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "LittleBeeGeo_backend/app/http_handlers/p_img_info_handler.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "33244" }, { "name": "LiveScript", "bytes": "123586" }, { "name": "Python", "bytes": "107715" }, { "name": "Shell", "bytes": "16354" } ], "symlink_target": "" }
from mpfmc.tests.MpfMcTestCase import MpfMcTestCase class TestText(MpfMcTestCase): def get_machine_path(self): return 'tests/machine_files/text' def get_config_file(self): return 'test_text.yaml' def get_widget(self, index=0): return self.mc.targets['default'].current_slide.widgets[index].widget def test_static_text(self): # Very basic test self.mc.events.post('static_text') self.advance_time() self.assertEqual(self.get_widget().text, 'TEST') def test_text_from_event_param1(self): # widget text is only from event param self.mc.events.post('text_from_event_param1', param1='HELLO') self.advance_time() self.assertEqual(self.get_widget().text, 'HELLO') # now make sure if we post the event again, the text updates self.mc.events.post('text_from_event_param1', param1='THIS') self.advance_time() self.assertEqual(self.get_widget().text, 'THIS') self.mc.events.post('text_from_event_param1', param1='IS') self.advance_time() self.assertEqual(self.get_widget().text, 'IS') self.mc.events.post('text_from_event_param1', param1='A') self.advance_time() self.assertEqual(self.get_widget().text, 'A') self.mc.events.post('text_from_event_param1', param1='NEW') self.advance_time() self.assertEqual(self.get_widget().text, 'NEW') self.mc.events.post('text_from_event_param1', param1='EVENT') self.advance_time() self.assertEqual(self.get_widget().text, 'EVENT') def test_text_from_event_param2(self): # widget text puts static text before param text self.mc.events.post('text_from_event_param2', param1='HELLO') self.advance_time() self.assertEqual(self.get_widget().text, 'HI HELLO') def test_text_from_event_param3(self): # widget text puts static text before and after param text self.mc.events.post('text_from_event_param3', param1='AND') self.advance_time() self.assertEqual(self.get_widget().text, 'MIX AND MATCH') def test_text_from_event_param4(self): # static and event text with no space between self.mc.events.post('text_from_event_param4', param1='SPACE') self.advance_time() self.assertEqual(self.get_widget().text, 'NOSPACE') def test_text_from_event_param5(self): #test event text that comes in as non-string self.mc.events.post('text_from_event_param5', param1=1) self.advance_time() self.assertEqual(self.get_widget().text, 'NUMBER 1') def test_text_from_event_param6(self): # placeholder for event text for a param that doesn't exist self.mc.events.post('text_from_event_param6') self.advance_time() self.assertEqual(self.get_widget().text, '(param1)') def test_text_from_event_param7(self): # test percent sign hard coded self.mc.events.post('text_from_event_param7') self.advance_time() self.assertEqual(self.get_widget().text, '1)') def test_text_from_event_param8(self): # test parenthesis next to placeholder text self.mc.events.post('text_from_event_param8', param1=100) self.advance_time() self.assertEqual(self.get_widget().text, '(100)') def test_player_var1(self): # staight var, no player specified self.mc.game_start() self.advance_time() self.mc.add_player(1) self.advance_time() self.mc.player_start_turn(1) self.advance_time() self.assertTrue(self.mc.player) self.mc.player.test_var = 1 self.mc.events.post('text_with_player_var1') self.advance_time() self.assertEqual(self.get_widget().text, '1') old_width = self.get_widget().width # update var, should update widget for x in range(99): self.mc.player.test_var += 1 self.advance_time(.01) self.advance_time() self.assertEqual(self.get_widget().text, '100') self.assertGreater(self.get_widget().width, old_width) def test_player_var2(self): # 'player' specified self.mc.game_start() self.advance_time() self.mc.add_player(1) self.advance_time() self.mc.player_start_turn(1) self.advance_time() self.assertTrue(self.mc.player) self.mc.player.test_var = 1 self.mc.events.post('text_with_player_var2') self.advance_time() self.assertEqual(self.get_widget().text, '1') def test_player_var3(self): # 'player1' specified self.mc.game_start() self.advance_time() self.mc.add_player(1) self.advance_time() self.mc.player_start_turn(1) self.advance_time() self.assertTrue(self.mc.player) self.mc.player.test_var = 1 self.mc.events.post('text_with_player_var3') self.advance_time() self.assertEqual(self.get_widget().text, '1') def test_player_var4(self): # 'player2' specified with no player 2. Should be blank. self.mc.game_start() self.advance_time() self.mc.add_player(1) self.advance_time() self.mc.player_start_turn(1) self.advance_time() self.assertTrue(self.mc.player) self.mc.player.test_var = 1 self.mc.events.post('text_with_player_var4') self.advance_time() self.assertEqual(self.get_widget().text, '') # Add player 2 & set the value. Widget should update self.mc.add_player(2) self.mc.player_list[1].test_var = 'Player 2 test variable' self.advance_time() self.assertEqual(self.get_widget().text, 'Player 2 test variable') def test_position_rounding(self): # staight var, no player specified self.mc.game_start() self.advance_time() self.mc.add_player(1) self.advance_time() self.mc.player_start_turn(1) self.advance_time() self.assertTrue(self.mc.player) # set var, should update widget with even pixel width and not offset self.mc.player.test_var = 'its even' self.mc.events.post('text_with_player_var1') self.advance_time() self.get_widget()._round_anchor_styles = ('left', None) bounding_box = self.get_widget().canvas.children[-1] self.assertEqual(self.get_widget().text, 'its even') self.assertIn(bounding_box.size, ((346, 118), (343.0, 118.0))) self.assertEqual(bounding_box.pos, (200, 150)) # update var, should update widget with an odd pixel width and offset DOWN self.mc.player.test_var = 'odd' self.advance_time() bounding_box = self.get_widget().canvas.children[-1] self.assertEqual(self.get_widget().text, 'odd') self.assertIn(bounding_box.size, ((171, 118), (169.0, 118.0))) self.assertEqual(bounding_box.pos, (199.5, 150)) # update var, should update widget with an odd pixel width and offset UP self.get_widget()._round_anchor_styles = ('right', None) self.mc.player.test_var = 'also odd' self.advance_time() bounding_box = self.get_widget().canvas.children[-1] self.assertEqual(self.get_widget().text, 'also odd') self.assertIn(bounding_box.size, ((384, 118), (381.0, 118.0))) self.assertIn(bounding_box.pos, ((200.0, 150), (200.5, 150.0))) # update var, should update widget with an even pixel width and not offset self.mc.player.test_var = 'no round' self.advance_time() bounding_box = self.get_widget().canvas.children[-1] self.assertEqual(self.get_widget().text, 'no round') self.assertIn(bounding_box.size, ((396, 118), (394.0, 118.0))) self.assertEqual(bounding_box.pos, (200, 150)) def test_current_player(self): # verifies that current player text update when current player changes self.mc.game_start() self.advance_time() self.mc.add_player(1) # Player 1 self.advance_time() self.mc.add_player(2) # Player 2 self.advance_time() self.mc.add_player(3) # Player 3 self.advance_time() # Test text: (test_var) # Player 1 self.mc.player_start_turn(1) self.advance_time() self.assertEqual(0, self.mc.player.index) self.assertEqual(1, self.mc.player.number) self.mc.player.test_var = 'Player 1 test var' self.mc.events.post('text_with_player_var1') self.advance_time() self.assertEqual(self.get_widget().text, 'Player 1 test var') # Player 2 self.mc.player_start_turn(2) self.advance_time() self.assertEqual(1, self.mc.player.index) self.assertEqual(2, self.mc.player.number) self.mc.player.test_var = 'Player 2 test var' self.advance_time() self.assertEqual(self.get_widget().text, 'Player 2 test var') # Player 3 self.mc.player_start_turn(3) self.advance_time() self.assertEqual(2, self.mc.player.index) self.assertEqual(3, self.mc.player.number) self.mc.player.test_var = 'Player 3 test var' self.advance_time() self.assertEqual(self.get_widget().text, 'Player 3 test var') # Back to player 1, make sure it's still there self.mc.player_start_turn(1) self.advance_time() self.assertEqual(self.get_widget().text, 'Player 1 test var') # Test text: (player|test_var) self.mc.events.post('text_with_player_var2') self.advance_time() self.assertEqual(self.get_widget().text, 'Player 1 test var') self.mc.player_start_turn(2) self.advance_time() self.assertEqual(self.get_widget().text, 'Player 2 test var') self.mc.player_start_turn(3) self.advance_time() self.assertEqual(self.get_widget().text, 'Player 3 test var') def test_mix_player_var_and_event_param(self): self.mc.game_start() self.advance_time() self.mc.add_player(1) self.advance_time() self.mc.player_start_turn(1) self.advance_time() self.assertTrue(self.mc.player) self.mc.player.player_var = 'PLAYER VAR' self.mc.events.post('text_with_player_var_and_event', test_param="EVENT PARAM") self.advance_time() self.assertEqual(self.get_widget().text, 'PLAYER VAR EVENT PARAM') self.mc.player.player_var = 'NEW PLAYER VAR' self.advance_time() self.assertEqual(self.get_widget().text, 'NEW PLAYER VAR EVENT PARAM') self.mc.events.post('text_with_player_var_and_event', test_param="NEW EVENT PARAM") self.advance_time() self.assertEqual(self.get_widget().text, 'NEW PLAYER VAR NEW EVENT PARAM') def test_number_grouping(self): self.mc.events.post('number_grouping') self.advance_time() # should be 00 even though text is 0 self.assertEqual(self.get_widget().text, '00') self.advance_time() self.get_widget().update_text('2000000') self.assertEqual(self.get_widget().text, '2,000,000') self.advance_time() def test_text_casing(self): self.mc.events.post('text_nocase') self.advance_time() self.assertEqual(self.get_widget().text, 'sAmPlE tExT caSiNg') self.mc.events.post('text_lower') self.advance_time() self.assertEqual(self.get_widget().text, 'sample text casing') self.mc.events.post('text_upper') self.advance_time() self.assertEqual(self.get_widget().text, 'SAMPLE TEXT CASING') self.mc.events.post('text_title') self.advance_time() self.assertEqual(self.get_widget().text, 'Sample Text Casing') self.mc.events.post('text_capitalize') self.advance_time() self.assertEqual(self.get_widget().text, 'Sample text casing') def test_text_string1(self): # simple text string in machine config self.mc.events.post('text_string1') self.advance_time() self.assertEqual(self.get_widget().text, 'HELLO') def test_text_string2(self): # two text strings in machine config self.mc.events.post('text_string2') self.advance_time() self.assertEqual(self.get_widget().text, 'HELLO PLAYER') def test_text_string3(self): # text string not found self.mc.events.post('text_string3') self.advance_time() self.assertEqual(self.get_widget().text, '$money') def test_text_string4(self): # text string found with extra dollar sign in text self.mc.events.post('text_string4') self.advance_time() self.assertEqual(self.get_widget().text, '$100') def test_custom_fonts(self): self.mc.events.post('mpfmc_font') self.advance_time() self.assertEqual('pixelmix', self.get_widget().font_name) self.mc.events.post('machine_font') self.advance_time() self.assertEqual('big_noodle_titling', self.get_widget().font_name) def test_baseline(self): self.mc.events.post('baseline') self.advance_time() # Baseline anchored widgets should all have the same local coordinates self.assertEqual(self.get_widget(0).pos[1], 100) self.assertEqual(self.get_widget(1).pos[1], 100) self.assertEqual(self.get_widget(2).pos[1], 100) self.assertEqual(self.get_widget(3).pos[1], 100) # Now convert the widget coordinates to parent coordinates for comparison widget0 = self.get_widget(0).parent.to_parent(0, self.get_widget(0).pos[1]) widget1 = self.get_widget(1).parent.to_parent(0, self.get_widget(1).pos[1]) widget2 = self.get_widget(2).parent.to_parent(0, self.get_widget(2).pos[1]) widget3 = self.get_widget(3).parent.to_parent(0, self.get_widget(3).pos[1]) # Baseline anchored widgets should be 4px lower self.assertEqual(widget0[1], 100) self.assertEqual(widget1[1], 96) self.assertEqual(widget2[1], 100) self.assertEqual(widget3[1], 96) def test_line_break(self): """Tests line break in text (multiple lines)""" self.mc.events.post('text_line_break') self.advance_time() self.assertGreater(self.get_widget().height, 40) def test_no_multiline(self): """Tests poorly formatted YAML line break in text""" self.mc.events.post('text_bad_line_break') self.advance_time() self.assertLess(self.get_widget().height, 30)
{ "content_hash": "a34fcf86cf01de032f718cebbe415246", "timestamp": "", "source": "github", "line_count": 433, "max_line_length": 83, "avg_line_length": 34.33256351039261, "alnum_prop": 0.6138840306740213, "repo_name": "missionpinball/mpf_mc", "id": "0d9e47a6bce3b5f14a7154b6ef90edaf6bdbab70", "size": "14866", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "mpfmc/tests/test_Text.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import sys import datetime from time import strptime, strftime def _getWeekDetails(_weekNo, _Year, _weekStart): rslt = [] janOne = strptime('%s-01-01' % _Year, '%Y-%m-%d') dayOfFirstWeek = ((7-int((strftime("%u",janOne)))+ int(_weekStart)) % 7) if dayOfFirstWeek == 0: dayOfFirstWeek = 7 dateOfFirstWeek = strptime('%s-01-%s' % (_Year, dayOfFirstWeek), '%Y-%m-%d') dayOne = datetime.datetime( dateOfFirstWeek.tm_year, dateOfFirstWeek.tm_mon, dateOfFirstWeek.tm_mday ) daysToGo = 7*(int(_weekNo)-1) lastDay = daysToGo+6 dayX = dayOne + datetime.timedelta(days = daysToGo) dayY = dayOne + datetime.timedelta(days = lastDay) resultDateX = strptime('%s-%s-%s' % (dayX.year, dayX.month, dayX.day), '%Y-%m-%d') resultDateY = strptime('%s-%s-%s' % (dayY.year, dayY.month, dayY.day), '%Y-%m-%d') rslt.append(resultDateX) rslt.append(resultDateY) return rslt if __name__ == '__main__': passedArgs = sys.argv if not (passedArgs[1] == None or passedArgs[2] == None): #initiate start of week to Monday (sunday =1, monday =2, so on) startOfWeek = 2 try : startOfWeek = passedArgs[3] except: startOfWeek = 2 WeekData = _getWeekDetails(passedArgs[1], passedArgs[2], startOfWeek) print "Monday of Week %s: %s \n" % (passedArgs[1], strftime("%Y-%m-%d", WeekData[0])) print "Sunday of Week %s: %s \n" % (passedArgs[1], strftime("%Y-%m-%d", WeekData[1]))
{ "content_hash": "3003138afc012bad79f2b8b88298ba8a", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 106, "avg_line_length": 41.55555555555556, "alnum_prop": 0.6096256684491979, "repo_name": "jonlatorre/gestioneide", "id": "40fc9e98dabe30f3bf162b7312c1a3253aec32e3", "size": "1496", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "clases/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "16184" }, { "name": "Python", "bytes": "26822" } ], "symlink_target": "" }
import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='networking_nec') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
{ "content_hash": "a01135bcda23801b766879596e25398e", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 69, "avg_line_length": 30.1875, "alnum_prop": 0.7391304347826086, "repo_name": "openstack/networking-nec", "id": "7047ce1d9e2a5f2de194c7fabbda648da09920f2", "size": "1081", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "networking_nec/_i18n.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "431" }, { "name": "Python", "bytes": "387801" }, { "name": "Shell", "bytes": "8526" } ], "symlink_target": "" }
import boto3 from botocore.client import ClientError import pytest import sure # noqa # pylint: disable=unused-import from moto import mock_s3 @mock_s3 def test_create_bucket_with_ownership(): bucket = "bucket-with-owner" ownership = "BucketOwnerPreferred" client = boto3.client("s3") client.create_bucket(Bucket=bucket, ObjectOwnership=ownership) response = client.get_bucket_ownership_controls(Bucket=bucket) response["OwnershipControls"]["Rules"][0]["ObjectOwnership"].should.equal(ownership) @mock_s3 def test_put_ownership_to_bucket(): bucket = "bucket-updated-with-owner" ownership = "ObjectWriter" client = boto3.client("s3") client.create_bucket(Bucket=bucket) client.put_bucket_ownership_controls( Bucket=bucket, OwnershipControls={"Rules": [{"ObjectOwnership": ownership}]} ) response = client.get_bucket_ownership_controls(Bucket=bucket) response["OwnershipControls"]["Rules"][0]["ObjectOwnership"].should.equal(ownership) @mock_s3 def test_delete_ownership_from_bucket(): bucket = "bucket-with-owner-removed" ownership = "BucketOwnerEnforced" client = boto3.client("s3") client.create_bucket(Bucket=bucket, ObjectOwnership=ownership) client.delete_bucket_ownership_controls(Bucket=bucket) with pytest.raises(ClientError) as ex: client.get_bucket_ownership_controls(Bucket=bucket) ex.value.response["Error"]["Code"].should.equal("OwnershipControlsNotFoundError") ex.value.response["Error"]["Message"].should.equal( "The bucket ownership controls were not found" )
{ "content_hash": "d5370e456d822e3c5c49aeb3afb7fa59", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 88, "avg_line_length": 32.1, "alnum_prop": 0.7258566978193146, "repo_name": "spulec/moto", "id": "bc68eb892bbdbe2e4498b463575aefda638d5e3a", "size": "1605", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_s3/test_s3_ownership.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "255" }, { "name": "HTML", "bytes": "5983" }, { "name": "Java", "bytes": "1688" }, { "name": "JavaScript", "bytes": "1424" }, { "name": "Jinja", "bytes": "2502" }, { "name": "Makefile", "bytes": "2284" }, { "name": "Python", "bytes": "14737868" }, { "name": "Ruby", "bytes": "188" }, { "name": "Scala", "bytes": "782" }, { "name": "Shell", "bytes": "5515" } ], "symlink_target": "" }
import sublime import sublime_plugin import os import json from datetime import datetime from .baseutil import SysIo from .setting import SfBasicConfig from .uiutil import SublConsole ########################################################################################## #Project Config ########################################################################################## class ProjectConfigCommand(sublime_plugin.TextCommand): def run(self, edit): self.sf_basic_config = SfBasicConfig() self.sublconsole = SublConsole(self.sf_basic_config) self.sublconsole.showlog('open config: ' + self.sf_basic_config.get_project_config_path()) self.sublconsole.open_file(self.sf_basic_config.get_project_config_path()) class NewXyProjectCommand(sublime_plugin.WindowCommand): def run(self, retrieve_type=None): self.sf_basic_config = SfBasicConfig() self.sublconsole = SublConsole(self.sf_basic_config) home = self.sf_basic_config.get_user_home_dir() tstr = datetime.now().strftime('YourProjectName_%Y%m%d_%H%M%S') self.save_dir = os.path.join(home, tstr) self.sublconsole.debug(self.window.extract_variables()) self.window.show_input_panel("Please Input your save path: " , self.save_dir, self.on_input, None, None) def on_input(self, args): self.sublconsole.debug(args) if not os.path.exists(args): os.makedirs(args) project_name = os.path.basename(args) sublime_settings_path = os.path.join(args, project_name + ".sublime-project") if not os.path.exists(sublime_settings_path): self._mk_project_file(args, sublime_settings_path) self.sublconsole.open_project(sublime_settings_path) self._open_config(args) def _mk_project_file(self, project_path, file_path): sysio = SysIo() self.sublconsole.debug("make project file") sublime_settings = {"folders":[{ "file_exclude_patterns": [ "*.*-meta.xml" ], "folder_exclude_patterns": [ self.sf_basic_config.get_xyfolder() + "/.tmp", self.sf_basic_config.get_xyfolder() + "/MetadataBackupTools/codepkg" ], "path": project_path }]} sysio.save_file(file_path, json.dumps(sublime_settings, indent=4)) def _open_config(self, project_dir): self.sf_basic_config = SfBasicConfig(project_dir=project_dir) self.sublconsole.showlog('open config: ' + self.sf_basic_config.get_project_config_path()) # # TODO # self.sublconsole.open_file(self.sf_basic_config.get_project_config_path()) # sublime.active_window().open_file(os.path.join(project_dir, ".xyconfig", "xyconfig.json")) # sublime.active_window().open_file(self.sf_basic_config.get_project_config_path()) #handles compiling to server on save class SaveListener(sublime_plugin.EventListener): def on_post_save_async(self, view): sf_basic_config = SfBasicConfig() settings = sf_basic_config.get_setting() username = settings["username"] if username != 'input your username' and username != '': if sf_basic_config.get_auto_save_to_server(): sf_basic_config.window.run_command("update_metadata") class ProjectConfigWizardCommand(sublime_plugin.WindowCommand): def run(self): self.sf_basic_config = SfBasicConfig() self.sublconsole = SublConsole(self.sf_basic_config) self.sublconsole.showlog('start to config project') self._init_input_conf() self.on_input(None) def on_input(self, args): print(args) if self.input_index > 0: pre_conf = self.input_conf[self.input_index-1] ui_type = pre_conf["type"] if ui_type == "input": pre_conf["value"] = args elif ui_type == "select": if 0 <= args and args < len(pre_conf["option-v"]): pre_conf["value"] = pre_conf["option-v"][args] if self.input_index < len(self.input_conf): current_conf = self.input_conf[self.input_index] if current_conf["type"] == "input": caption = "Please Input your %s: " % current_conf["key"] self.window.show_input_panel(caption, current_conf["value"], self.on_input, None, None) elif current_conf["type"] == "select": show_opts = current_conf["option"] self.window.show_quick_panel(show_opts, self.on_input, sublime.MONOSPACE_FONT) self.input_index = self.input_index + 1 else: self._save_conf() def _init_input_conf(self): settings = self.sf_basic_config.get_setting() self.input_index = 0 self.input_conf = ( {"key" : "is_sandbox", "value" : settings["is_sandbox"], "type" : "select", "option" : [ "Is Sandbox ? True", "Is Sandbox ? False"], "option-v" : [True, False] }, {"key" : "username", "value" : settings["username"], "type" : "input" }, {"key" : "password", "value" : settings["password"], "type" : "input" }, {"key" : "security_token", "value" : settings["security_token"], "type" : "input" }, {"key" : "api_version", "value" : settings["api_version"], "type" : "select", "option" : ["45.0", "44.0", "43.0", "42.0", "41.0", "40.0", "39.0", "38.0", "37.0", "36.0"], "option-v" : [45.0, 44.0, 43.0, 42.0, 41.0, 40.0, 39.0, 38.0, 37.0, 36.0] }, {"key" : "auto_save_to_server", "value" : self.sf_basic_config.get_auto_save_to_server(), "type" : "select", "option" : [ "Auto Save to Server ? No.", "Auto Save to Server ? Yes."], "option-v" : [False, True] }, {"key" : "default_browser", "value" : settings["default_browser"], "type" : "select", "option" : self.sf_basic_config.get_browser_setting2(), "option-v" : [item[1] for item in self.sf_basic_config.get_browser_setting2()] } ) def _save_conf(self): project_config = {} for a_conf in self.input_conf: project_config[a_conf["key"]] = a_conf["value"] self.sublconsole.debug(project_config) self.sf_basic_config.update_project_config(project_config) self.sublconsole.showlog('config project done!') class ProxyConfigWizardCommand(sublime_plugin.WindowCommand): def run(self): self.sf_basic_config = SfBasicConfig() self.sublconsole = SublConsole(self.sf_basic_config) self.sublconsole.showlog('start to config proxy') self._init_input_conf() self.on_input(None) def on_input(self, args): print(args) if self.input_index > 0: pre_conf = self.input_conf[self.input_index-1] ui_type = pre_conf["type"] if ui_type == "input": pre_conf["value"] = args elif ui_type == "select": if 0 <= args and args < len(pre_conf["option-v"]): pre_conf["value"] = pre_conf["option-v"][args] if self.input_index < len(self.input_conf): current_conf = self.input_conf[self.input_index] if current_conf["type"] == "input": caption = "Please Input your %s: " % current_conf["key"] self.window.show_input_panel(caption, current_conf["value"], self.on_input, None, None) elif current_conf["type"] == "select": show_opts = current_conf["option"] self.window.show_quick_panel(show_opts, self.on_input, sublime.MONOSPACE_FONT) self.input_index = self.input_index + 1 else: self._save_conf() def _init_input_conf(self): proxy_config = self.sf_basic_config.get_proxy() self.input_index = 0 self.input_conf = ( {"key" : "use_proxy", "value" : proxy_config["use_proxy"], "type" : "select", "option" : [ "Use Proxy.", "Don't use Proxy."], "option-v" : [True, False] }, {"key" : "proxyhost", "value" : proxy_config["proxyhost"], "type" : "input" }, {"key" : "proxyport", "value" : proxy_config["proxyport"], "type" : "input" }, {"key" : "proxypassword", "value" : proxy_config["proxypassword"], "type" : "input" }, {"key" : "proxyuser", "value" : proxy_config["proxyuser"], "type" : "input" } ) def _save_conf(self): proxy_config = {} for a_conf in self.input_conf: proxy_config[a_conf["key"]] = a_conf["value"] project_config = {} project_config["proxy"] = proxy_config self.sf_basic_config.update_project_config(project_config) self.sublconsole.showlog('config proxy done!')
{ "content_hash": "6c0b679703f2b6231ccaec8d85826503", "timestamp": "", "source": "github", "line_count": 226, "max_line_length": 105, "avg_line_length": 41.15929203539823, "alnum_prop": 0.5452590840679423, "repo_name": "exiahuang/SalesforceXyTools", "id": "b217861724f3ec5b62278292f89be423a2398807", "size": "9302", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main_project.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Apex", "bytes": "9303" }, { "name": "Batchfile", "bytes": "352" }, { "name": "Python", "bytes": "2213820" } ], "symlink_target": "" }
from unittest import TestCase from warnings import catch_warnings, simplefilter as simple_filter from iota import Address, AddressChecksum, AsciiTrytesCodec, Hash, Tag, \ TryteString, TrytesDecodeError class TryteStringTestCase(TestCase): def test_ascii_bytes(self): """ Getting an ASCII representation of a TryteString, as bytes. """ self.assertEqual( bytes(TryteString(b'HELLOIOTA')), b'HELLOIOTA', ) def test_ascii_str(self): """ Getting an ASCII representation of a TryteString, as a unicode string. """ self.assertEqual( str(TryteString(b'HELLOIOTA')), 'HELLOIOTA', ) def test_comparison(self): """ Comparing TryteStrings for equality. """ trytes1 = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') trytes2 = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') trytes3 = TryteString( b'CCPCBDVC9DTCEAKDXC9D9DEARCWCPCBDVCTCEAHDWCTCEAKDCDFD9DSCSA', ) self.assertTrue(trytes1 == trytes2) self.assertFalse(trytes1 != trytes2) self.assertFalse(trytes1 == trytes3) self.assertTrue(trytes1 != trytes3) self.assertTrue(trytes1 is trytes1) self.assertFalse(trytes1 is not trytes1) self.assertFalse(trytes1 is trytes2) self.assertTrue(trytes1 is not trytes2) self.assertFalse(trytes1 is trytes3) self.assertTrue(trytes1 is not trytes3) # Comparing against strings is also allowed. self.assertTrue(trytes1 == b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertFalse(trytes1 != b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertFalse(trytes3 == b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertTrue(trytes3 != b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertTrue(trytes1 == 'RBTC9D9DCDQAEASBYBCCKBFA') self.assertFalse(trytes1 != 'RBTC9D9DCDQAEASBYBCCKBFA') self.assertFalse(trytes3 == 'RBTC9D9DCDQAEASBYBCCKBFA') self.assertTrue(trytes3 != 'RBTC9D9DCDQAEASBYBCCKBFA') # Ditto for bytearrays. self.assertTrue(trytes1 == bytearray(b'RBTC9D9DCDQAEASBYBCCKBFA')) self.assertFalse(trytes1 != bytearray(b'RBTC9D9DCDQAEASBYBCCKBFA')) self.assertFalse(trytes3 == bytearray(b'RBTC9D9DCDQAEASBYBCCKBFA')) self.assertTrue(trytes3 != bytearray(b'RBTC9D9DCDQAEASBYBCCKBFA')) def test_comparison_error_wrong_type(self): """ Attempting to compare a TryteString with something that is not a TrytesCompatible. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') with self.assertRaises(TypeError): # TryteString is not a numeric type, so comparing against a # numeric value doesn't make any sense. trytes == 42 # Identity comparison still works though. self.assertFalse(trytes is 'RBTC9D9DCDQAEASBYBCCKBFA') self.assertTrue(trytes is not 'RBTC9D9DCDQAEASBYBCCKBFA') def test_bool_cast(self): """ Casting a TryteString as a boolean. """ # Empty TryteString evaluates to False. self.assertIs(bool(TryteString(b'')), False) # TryteString that is nothing but padding also evaluates to False. self.assertIs(bool(TryteString(b'9')), False) self.assertIs(bool(TryteString(b'', pad=1024)), False) # A single non-padding tryte evaluates to True. self.assertIs(bool(TryteString(b'A')), True) self.assertIs(bool(TryteString(b'9'*1024 + b'Z')), True) def test_container(self): """ Checking whether a TryteString contains a sequence. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertTrue(trytes in trytes) self.assertTrue(TryteString(b'RBTC9D') in trytes) self.assertTrue(TryteString(b'DQAEAS') in trytes) self.assertTrue(TryteString(b'CCKBFA') in trytes) self.assertFalse(TryteString(b'9RBTC9D9DCDQAEASBYBCCKBFA') in trytes) self.assertFalse(TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA9') in trytes) self.assertFalse(TryteString(b'RBTC9D9DCDQA9EASBYBCCKBFA') in trytes) self.assertFalse(TryteString(b'X') in trytes) # Any TrytesCompatible value will work here. self.assertTrue(b'EASBY' in trytes) self.assertTrue('EASBY' in trytes) self.assertFalse(b'QQQ' in trytes) self.assertFalse('QQQ' in trytes) self.assertTrue(bytearray(b'CCKBF') in trytes) self.assertFalse(bytearray(b'ZZZ') in trytes) def test_container_error_wrong_type(self): """ Checking whether a TryteString contains a sequence with an incompatible type. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') with self.assertRaises(TypeError): # TryteString is not a numeric type, so this makes about as much # sense as ``16 in b'Hello, world!'``. 16 in trytes with self.assertRaises(TypeError): # This is too ambiguous. Is this a list of trit values that can # appar anywhere in the tryte sequence, or does it have to match # a tryte exactly? [0, 1, 1, 0, -1, 0] in trytes with self.assertRaises(TypeError): # This makes more sense than the previous example, but for # consistency, we will not allow checking for trytes inside # of a TryteString. [[0, 0, 0], [1, 1, 0]] in trytes with self.assertRaises(TypeError): # Did I miss something? When did we get to DisneyLand? None in trytes def test_concatenation(self): """ Concatenating TryteStrings with TrytesCompatibles. """ trytes1 = TryteString(b'RBTC9D9DCDQA') trytes2 = TryteString(b'EASBYBCCKBFA') concat = trytes1 + trytes2 self.assertIsInstance(concat, TryteString) self.assertEqual(bytes(concat), b'RBTC9D9DCDQAEASBYBCCKBFA') # You can also concatenate a TryteString with any TrytesCompatible. self.assertEqual( bytes(trytes1 + b'EASBYBCCKBFA'), b'RBTC9D9DCDQAEASBYBCCKBFA', ) self.assertEqual( bytes(trytes1 + 'EASBYBCCKBFA'), b'RBTC9D9DCDQAEASBYBCCKBFA', ) self.assertEqual( bytes(trytes1 + bytearray(b'EASBYBCCKBFA')), b'RBTC9D9DCDQAEASBYBCCKBFA', ) def test_concatenation_error_wrong_type(self): """ Attempting to concatenate a TryteString with something that is not a TrytesCompatible. """ trytes = TryteString(b'RBTC9D9DCDQA') with self.assertRaises(TypeError): # TryteString is not a numeric type, so adding a numeric value # doesn't make any sense. trytes += 42 with self.assertRaises(TypeError): # What is this I don't even.. trytes += None def test_slice_accessor(self): """ Taking slices of a TryteString. """ ts = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertEqual(ts[4], TryteString(b'9')) self.assertEqual(ts[:4], TryteString(b'RBTC')) self.assertEqual(ts[:-4], TryteString(b'RBTC9D9DCDQAEASBYBCC')) self.assertEqual(ts[4:], TryteString(b'9D9DCDQAEASBYBCCKBFA')) self.assertEqual(ts[-4:], TryteString(b'KBFA')) self.assertEqual(ts[4:-4:4], TryteString(b'9CEY')) with self.assertRaises(IndexError): ts[42] # To match the behavior of built-in types, TryteString will allow # you to access a slice that occurs after the end of the sequence. # There's nothing in it, of course, but you can access it. self.assertEqual(ts[42:43], TryteString(b'')) def test_slice_mutator(self): """ Modifying slices of a TryteString. """ ts = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') ts[4] = TryteString(b'A') self.assertEqual(ts, TryteString(b'RBTCAD9DCDQAEASBYBCCKBFA')) ts[:4] = TryteString(b'BCDE') self.assertEqual(ts, TryteString(b'BCDEAD9DCDQAEASBYBCCKBFA')) # The lengths do not have to be the same... ts[:-4] = TryteString(b'EFGHIJ') self.assertEqual(ts, TryteString(b'EFGHIJKBFA')) # ... unless you are trying to set a single tryte. with self.assertRaises(ValueError): ts[4] = TryteString(b'99') # Any TrytesCompatible value will work. ts[3:-3] = b'FOOBAR' self.assertEqual(ts, TryteString(b'EFGFOOBARBFA')) # I have no idea why you would ever need to do this, but I'm not # going to judge, either. ts[2:-2:2] = b'IOTA' self.assertEqual(ts, TryteString(b'EFIFOOTAABFA')) with self.assertRaises(IndexError): ts[42] = b'9' # To match the behavior of built-in types, TryteString will allow # you to modify a slice that occurs after the end of the sequence. ts[42:43] = TryteString(b'9') self.assertEqual(ts, TryteString(b'EFIFOOTAABFA9')) def test_iter_chunks(self): """ Iterating over a TryteString in constant-size chunks. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertListEqual( list(trytes.iter_chunks(9)), [ TryteString(b'RBTC9D9DC'), TryteString(b'DQAEASBYB'), # The final chunk is padded as necessary. TryteString(b'CCKBFA999'), ], ) def test_init_from_unicode_string(self): """ Initializing a TryteString from a unicode string. """ trytes1 = TryteString('RBTC9D9DCDQAEASBYBCCKBFA') trytes2 = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertEqual(trytes1, trytes2) def test_init_from_unicode_string_error_not_ascii(self): """ Attempting to initialize a TryteString from a unicode string that contains non-ASCII characters. """ with self.assertRaises(UnicodeEncodeError): TryteString('¡Hola, IOTA!') def test_init_from_tryte_string(self): """ Initializing a TryteString from another TryteString. """ trytes1 = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') trytes2 = TryteString(trytes1) self.assertFalse(trytes1 is trytes2) self.assertTrue(trytes1 == trytes2) def test_init_from_tryte_string_error_wrong_subclass(self): """ Initializing a TryteString from a conflicting subclass instance. This restriction does not apply when initializing a TryteString instance; only subclasses. """ tag = Tag(b'RBTC9D9DCDQAEASBYBCCKBFA') with self.assertRaises(TypeError): # When initializing a subclassed TryteString, you have to use the # same type (or a generic TryteString). Address(tag) # If you are 110% confident that you know what you are doing, you # can force the conversion by casting as a generic TryteString # first. addy = Address(TryteString(tag)) self.assertEqual( bytes(addy), b'RBTC9D9DCDQAEASBYBCCKBFA9999999999999999' b'99999999999999999999999999999999999999999', ) def test_init_padding(self): """ Apply padding to ensure a TryteString has a minimum length. """ trytes = TryteString( trytes = b'ZJVYUGTDRPDYFGFXMKOTV9ZWSGFK9CFPXTITQL' b'QNLPPG9YNAARMKNKYQO9GSCSBIOTGMLJUFLZWSY', pad = 81, ) self.assertEqual( bytes(trytes), # Note the additional Tryte([-1, -1, -1]) values appended to the # end of the sequence (represented in ASCII as '9'). b'ZJVYUGTDRPDYFGFXMKOTV9ZWSGFK9CFPXTITQLQN' b'LPPG9YNAARMKNKYQO9GSCSBIOTGMLJUFLZWSY9999' ) def test_init_from_tryte_string_with_padding(self): """ Initializing a TryteString from another TryteString, and padding the new one to a specific length. """ trytes1 = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') trytes2 = TryteString(trytes1, pad=27) self.assertFalse(trytes1 is trytes2) self.assertFalse(trytes1 == trytes2) self.assertEqual(bytes(trytes2), b'RBTC9D9DCDQAEASBYBCCKBFA999') def test_init_error_invalid_characters(self): """ Attempting to reset a TryteString with a value that contains invalid characters. """ with self.assertRaises(ValueError): TryteString(b'not valid') def test_init_error_int(self): """ Attempting to reset a TryteString from an int. """ with self.assertRaises(TypeError): TryteString(42) def test_length(self): """ Just like byte strings, TryteStrings have length. """ self.assertEqual(len(TryteString(b'RBTC')), 4) self.assertEqual(len(TryteString(b'RBTC', pad=81)), 81) def test_iterator(self): """ Just like byte strings, you can iterate over TryteStrings. """ self.assertListEqual( list(TryteString(b'RBTC')), [b'R', b'B', b'T', b'C'], ) self.assertListEqual( list(TryteString(b'RBTC', pad=6)), [b'R', b'B', b'T', b'C', b'9', b'9'], ) def test_encode(self): """ Converting a sequence of trytes into a sequence of bytes. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') self.assertEqual(trytes.encode(), b'Hello, IOTA!') def test_encode_partial_sequence_errors_strict(self): """ Attempting to convert an odd number of trytes into bytes using the `encode` method with errors='strict'. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA9') with self.assertRaises(TrytesDecodeError): trytes.encode(errors='strict') def test_encode_partial_sequence_errors_ignore(self): """ Attempting to convert an odd number of trytes into bytes using the `encode` method with errors='ignore'. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA9') self.assertEqual( trytes.encode(errors='ignore'), # The extra tryte is ignored. b'Hello, IOTA!', ) def test_encode_partial_sequence_errors_replace(self): """ Attempting to convert an odd number of trytes into bytes using the `encode` method with errors='replace'. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA9') self.assertEqual( trytes.encode(errors='replace'), # The extra tryte is replaced with '?'. b'Hello, IOTA!?', ) def test_encode_non_ascii_errors_strict(self): """ Converting a sequence of trytes into bytes using the `encode` method yields non-ASCII characters, and errors='strict'. """ trytes = TryteString(b'ZJVYUGTDRPDYFGFXMK') with self.assertRaises(TrytesDecodeError): trytes.encode(errors='strict') def test_encode_non_ascii_errors_ignore(self): """ Converting a sequence of trytes into bytes using the `encode` method yields non-ASCII characters, and errors='ignore'. """ trytes = TryteString(b'ZJVYUGTDRPDYFGFXMK') self.assertEqual( trytes.encode(errors='ignore'), b'\xd2\x80\xc3', ) def test_encode_non_ascii_errors_replace(self): """ Converting a sequence of trytes into bytes using the `encode` method yields non-ASCII characters, and errors='replace'. """ trytes = TryteString(b'ZJVYUGTDRPDYFGFXMK') self.assertEqual( trytes.encode(errors='replace'), b'??\xd2\x80??\xc3??', ) def test_as_bytes_deprecated(self): """ :py:meth:`TryteString.as_bytes` is deprecated in favor of :py:meth:`TryteString.encode`. """ trytes = TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA') with catch_warnings(record=True) as caught_warnings: simple_filter('always', category=DeprecationWarning) encoded = trytes.as_bytes() self.assertEqual( [w.category for w in caught_warnings], [DeprecationWarning], ) self.assertEqual(encoded, b'Hello, IOTA!') def test_decode(self): """ Converting a sequence of trytes into a Unicode string. """ trytes = TryteString(b'LH9GYEMHCF9GWHZFEELHVFOEOHNEEEWHZFUD') self.assertEqual(trytes.decode(), '你好,世界!') def test_decode_strip(self): """ Strip trailing padding from a TryteString before converting. """ # Note odd number of trytes! trytes = TryteString(b'LH9GYEMHCF9GWHZFEELHVFOEOHNEEEWHZFUD9999999999999') self.assertEqual(trytes.decode(), '你好,世界!') def test_decode_no_strip(self): """ Prevent stripping trailing padding when converting to string. """ trytes = TryteString(b'LH9GYEMHCF9GWHZFEELHVFOEOHNEEEWHZFUD999999999999') self.assertEqual( trytes.decode(strip_padding=False), '你好,世界!\x00\x00\x00\x00\x00\x00', ) def test_decode_not_utf8_errors_strict(self): """ The tryte sequence does not represent a valid UTF-8 sequence, and errors='strict'. """ # Chop off a couple of trytes to break up a multi-byte sequence. trytes = TryteString.from_unicode('你好,世界!')[:-2] # Note the exception type. The trytes were decoded to bytes # successfully; the exception occurred while trying to decode the # bytes into Unicode code points. with self.assertRaises(UnicodeDecodeError): trytes.decode('strict') def test_decode_not_utf8_errors_ignore(self): """ The tryte sequence does not represent a valid UTF-8 sequence, and errors='ignore'. """ # Chop off a couple of trytes to break up a multi-byte sequence. trytes = TryteString.from_unicode('你好,世界!')[:-2] self.assertEqual( trytes.decode('ignore'), '你好,世界', ) def test_decode_not_utf8_errors_replace(self): """ The tryte sequence does not represent a valid UTF-8 sequence, and errors='replace'. """ # Chop off a couple of trytes to break up a multi-byte sequence. trytes = TryteString.from_unicode('你好,世界!')[:-2] self.assertEqual( trytes.decode('replace'), # Note that the replacement character is the Unicode replacement # character, not '?'. '你好,世界�', ) def test_as_string_deprecated(self): """ :py:meth:`TryteString.as_string` is deprecated in favor of :py:meth:`TryteString.decode`. """ trytes = TryteString(b'LH9GYEMHCF9GWHZFEELHVFOEOHNEEEWHZFUD') with catch_warnings(record=True) as caught_warnings: simple_filter('always', category=DeprecationWarning) decoded = trytes.as_string() self.assertEqual( [w.category for w in caught_warnings], [DeprecationWarning], ) self.assertEqual(decoded, '你好,世界!') def test_as_trytes_single_tryte(self): """ Converting a single-tryte TryteString into a sequence of tryte values. """ # Fortunately, there's only 27 possible tryte configurations, so # it's not too painful to test them all. self.assertDictEqual( { chr(c): TryteString(chr(c).encode('ascii')).as_trytes() for c in AsciiTrytesCodec.alphabet.values() }, { '9': [[ 0, 0, 0]], # 0 'A': [[ 1, 0, 0]], # 1 'B': [[-1, 1, 0]], # 2 'C': [[ 0, 1, 0]], # 3 'D': [[ 1, 1, 0]], # 4 'E': [[-1, -1, 1]], # 5 'F': [[ 0, -1, 1]], # 6 'G': [[ 1, -1, 1]], # 7 'H': [[-1, 0, 1]], # 8 'I': [[ 0, 0, 1]], # 9 'J': [[ 1, 0, 1]], # 10 'K': [[-1, 1, 1]], # 11 'L': [[ 0, 1, 1]], # 12 'M': [[ 1, 1, 1]], # 13 'N': [[-1, -1, -1]], # -13 (overflow) 'O': [[ 0, -1, -1]], # -12 'P': [[ 1, -1, -1]], # -11 'Q': [[-1, 0, -1]], # -10 'R': [[ 0, 0, -1]], # -9 'S': [[ 1, 0, -1]], # -8 'T': [[-1, 1, -1]], # -7 'U': [[ 0, 1, -1]], # -6 'V': [[ 1, 1, -1]], # -5 'W': [[-1, -1, 0]], # -4 'X': [[ 0, -1, 0]], # -3 'Y': [[ 1, -1, 0]], # -2 'Z': [[-1, 0, 0]], # -1 }, ) def test_as_trytes_mulitple_trytes(self): """ Converting a multiple-tryte TryteString into a sequence of tryte values. """ self.assertListEqual( TryteString(b'ZJVYUGTDRPDYFGFXMK').as_trytes(), [ [-1, 0, 0], [ 1, 0, 1], [ 1, 1, -1], [ 1, -1, 0], [ 0, 1, -1], [ 1, -1, 1], [-1, 1, -1], [ 1, 1, 0], [ 0, 0, -1], [ 1, -1, -1], [ 1, 1, 0], [ 1, -1, 0], [ 0, -1, 1], [ 1, -1, 1], [ 0, -1, 1], [ 0, -1, 0], [ 1, 1, 1], [-1, 1, 1], ], ) def test_as_trits_single_tryte(self): """ Converting a single-tryte TryteString into a sequence of trit values. """ # Fortunately, there's only 27 possible tryte configurations, so # it's not too painful to test them all. self.assertDictEqual( { chr(c): TryteString(chr(c).encode('ascii')).as_trits() for c in AsciiTrytesCodec.alphabet.values() }, { '9': [ 0, 0, 0], # 0 'A': [ 1, 0, 0], # 1 'B': [-1, 1, 0], # 2 'C': [ 0, 1, 0], # 3 'D': [ 1, 1, 0], # 4 'E': [-1, -1, 1], # 5 'F': [ 0, -1, 1], # 6 'G': [ 1, -1, 1], # 7 'H': [-1, 0, 1], # 8 'I': [ 0, 0, 1], # 9 'J': [ 1, 0, 1], # 10 'K': [-1, 1, 1], # 11 'L': [ 0, 1, 1], # 12 'M': [ 1, 1, 1], # 13 'N': [-1, -1, -1], # -13 (overflow) 'O': [ 0, -1, -1], # -12 'P': [ 1, -1, -1], # -11 'Q': [-1, 0, -1], # -10 'R': [ 0, 0, -1], # -9 'S': [ 1, 0, -1], # -8 'T': [-1, 1, -1], # -7 'U': [ 0, 1, -1], # -6 'V': [ 1, 1, -1], # -5 'W': [-1, -1, 0], # -4 'X': [ 0, -1, 0], # -3 'Y': [ 1, -1, 0], # -2 'Z': [-1, 0, 0], # -1 }, ) def test_as_trits_multiple_trytes(self): """ Converting a multiple-tryte TryteString into a sequence of trit values. """ self.assertListEqual( TryteString(b'ZJVYUGTDRPDYFGFXMK').as_trits(), [ -1, 0, 0, 1, 0, 1, 1, 1, -1, 1, -1, 0, 0, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 0, 0, 0, -1, 1, -1, -1, 1, 1, 0, 1, -1, 0, 0, -1, 1, 1, -1, 1, 0, -1, 1, 0, -1, 0, 1, 1, 1, -1, 1, 1, ], ) def test_random(self): """ Generating a random sequence of trytes. """ trytes = TryteString.random(Hash.LEN) # It is (hopefully!) impossible to predict what the actual trytes # will be, but at least we can verify that the correct number were # generated. self.assertEqual(len(trytes), Hash.LEN) def test_random_no_length(self): """ Trying to create a random TryteString without specifying length. """ with self.assertRaises(TypeError): trytes = TryteString.random() def test_random_wrong_length(self): """ Generating random Trytestring with negative length. """ with self.assertRaises(TypeError): trytes = TryteString.random(length=-5) def test_from_bytes(self): """ Converting a sequence of bytes into a TryteString. """ self.assertEqual( bytes(TryteString.from_bytes(b'Hello, IOTA!')), b'RBTC9D9DCDQAEASBYBCCKBFA', ) def test_from_unicode(self): """ Converting a Unicode string into a TryteString. """ self.assertEqual( bytes(TryteString.from_unicode('你好,世界!')), b'LH9GYEMHCF9GWHZFEELHVFOEOHNEEEWHZFUD', ) def test_from_string_deprecated(self): """ :py:meth:`TryteString.from_string` is deprecated in favor of :py:meth:`TryteString.from_unicode`. https://github.com/iotaledger/iota.py/issues/90 """ with catch_warnings(record=True) as caught_warnings: simple_filter('always', category=DeprecationWarning) trytes = TryteString.from_string('你好,世界!') self.assertEqual( [w.category for w in caught_warnings], [DeprecationWarning], ) self.assertEqual( bytes(trytes), b'LH9GYEMHCF9GWHZFEELHVFOEOHNEEEWHZFUD', ) def test_from_trytes(self): """ Converting a sequence of tryte values into a TryteString. """ trytes = [ [0, 0, -1], [-1, 1, 0], [-1, 1, -1], [0, 1, 0], [0, 0, 0], [1, 1, 0], [0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 1, 0], [-1, 0, -1], [1, 0, 0], [-1, -1, 1], [1, 0, 0], [1, 0, -1], [-1, 1, 0], [1, -1, 0], [-1, 1, 0], [0, 1, 0], [0, 1, 0], [-1, 1, 1], [-1, 1, 0], [0, -1, 1], [1, 0, 0], ] self.assertEqual( bytes(TryteString.from_trytes(trytes)), b'RBTC9D9DCDQAEASBYBCCKBFA', ) def test_from_trits(self): """ Converting a sequence of trit values into a TryteString. """ trits = [ 0, 0, -1, -1, 1, 0, -1, 1, -1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, -1, 0, -1, 1, 0, 0, -1, -1, 1, 1, 0, 0, 1, 0, -1, -1, 1, 0, 1, -1, 0, -1, 1, 0, 0, 1, 0, 0, 1, 0, -1, 1, 1, -1, 1, 0, 0, -1, 1, 1, 0, 0, ] self.assertEqual( bytes(TryteString.from_trits(trits)), b'RBTC9D9DCDQAEASBYBCCKBFA', ) def test_from_trits_wrong_length_padded(self): """ Automatically padding a sequence of trit values with length not divisible by 3 so that it can be converted into a TryteString. """ trits = [ 0, 0, -1, -1, 1, 0, -1, 1, -1, 0, 1, # 0, <- Oops, did you lose something? ] self.assertEqual( bytes(TryteString.from_trits(trits)), b'RBTC', ) class HashTestCase(TestCase): def test_random(self): """ Generating a random Hash. """ rand = Hash.random() self.assertEqual(len(rand), Hash.LEN) class AddressTestCase(TestCase): def test_init_automatic_pad(self): """ Addresses are automatically padded to 81 trytes. """ addy = Address( b'JVMTDGDPDFYHMZPMWEKKANBQSLSDTIIHAYQUMZOK' b'HXXXGJHJDQPOMDOMNRDKYCZRUFZROZDADTHZC' ) self.assertEqual( bytes(addy), # Note the extra 9's added to the end. b'JVMTDGDPDFYHMZPMWEKKANBQSLSDTIIHAYQUMZOK' b'HXXXGJHJDQPOMDOMNRDKYCZRUFZROZDADTHZC9999', ) # This attribute will make more sense once we start working with # address checksums. self.assertEqual( bytes(addy.address), b'JVMTDGDPDFYHMZPMWEKKANBQSLSDTIIHAYQUMZOK' b'HXXXGJHJDQPOMDOMNRDKYCZRUFZROZDADTHZC9999', ) # Checksum is not generated automatically. self.assertIsNone(addy.checksum) def test_init_error_too_long(self): """ Attempting to create an address longer than 81 trytes. """ with self.assertRaises(ValueError): Address( # Extra padding at the end is not ignored. # If it's an address (without checksum), then it must be 81 # trytes exactly. b'JVMTDGDPDFYHMZPMWEKKANBQSLSDTIIHAYQUMZOK' b'HXXXGJHJDQPOMDOMNRDKYCZRUFZROZDADTHZC99999' ) def test_init_with_checksum(self): """ Creating an address with checksum already attached. """ addy = Address( b'RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE' b'9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVAFOXM9MUBX' ) self.assertEqual( bytes(addy), b'RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE' b'9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVAFOXM9MUBX', ) self.assertEqual( bytes(addy.address), b'RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE' b'9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVA', ) self.assertEqual( bytes(addy.checksum), b'FOXM9MUBX', ) def test_init_error_checksum_too_long(self): """ Attempting to create an address longer than 90 trytes. """ with self.assertRaises(ValueError): Address( # Extra padding at the end is not ignored. # If it's a checksummed address, then it must be 90 trytes # exactly. b'RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE' b'9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVAFOXM9MUBX9' ) def test_checksum_valid(self): """ An address is created with a valid checksum. """ addy = Address( b'RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE' b'9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVAITCOXAQSD', ) self.assertTrue(addy.is_checksum_valid()) self.assertEqual( bytes(addy.with_valid_checksum()), b'RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE' b'9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVAITCOXAQSD', ) def test_checksum_invalid(self): """ An address is created with an invalid checksum. """ trytes = ( b'IGKUOZGEFNSVJXETLIBKRSUZAWMYSVDPMHGQPCETEFNZP' b'XSJLZMBLAWDRLUBWPIPKFNEPADIWMXMYYRKQ' ) addy = Address( trytes + b'XYYNAFRMB' # <- Last tryte s/b 'A'. ) self.assertFalse(addy.is_checksum_valid()) self.assertEqual( bytes(addy.with_valid_checksum()), b'IGKUOZGEFNSVJXETLIBKRSUZAWMYSVDPMHGQPCETEFNZP' b'XSJLZMBLAWDRLUBWPIPKFNEPADIWMXMYYRKQXYYNAFRMA', ) def test_checksum_null(self): """ An address is created without a checksum. """ trytes = ( b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMV' ) addy = Address(trytes) self.assertFalse(addy.is_checksum_valid()) self.assertEqual( bytes(addy.with_valid_checksum()), b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMVJJJGBARPB', ) def test_with_checksum_attributes(self): """ :py:meth:`Address.with_valid_checksum` also copies attributes such as key index and balance. """ addy =\ Address( trytes = b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMV', key_index = 42, balance = 86, ) checked = addy.with_valid_checksum() self.assertEqual(checked.key_index, 42) self.assertEqual(checked.balance, 86) def test_add_checksum(self): """ Checksum is added to an address without it. """ addy = Address( trytes = b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMV' ) addy.add_checksum() self.assertTrue(addy.is_checksum_valid()) self.assertTrue(len(addy) == Address.LEN + AddressChecksum.LEN) def test_add_checksum_second_time(self): """ Checksum is added to an address that already has. """ addy = Address( trytes = b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMVJJJGBARPB' ) addy.add_checksum() self.assertTrue(addy.is_checksum_valid()) self.assertTrue(len(addy) == Address.LEN + AddressChecksum.LEN) self.assertEqual( addy, Address( trytes = b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMVJJJGBARPB' ) ) def test_remove_checksum(self): """ Checksum is removed from an address. """ addy = Address( trytes = b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMVJJJGBARPB' ) self.assertTrue(addy.is_checksum_valid()) self.assertTrue(len(addy) == Address.LEN + AddressChecksum.LEN) addy.remove_checksum() self.assertFalse(addy.is_checksum_valid()) self.assertTrue(len(addy) == Address.LEN) def test_remove_checksum_second_time(self): """ `remove_checksum` is called on an Address that does not have a checksum. """ addy = Address( trytes = b'ZKIUDZXQYQAWSHPKSAATJXPAQZPGYCDCQDRSMWWCGQJNI' b'PCOORMDRNREDUDKBMUYENYTFVUNEWDBAKXMV' ) self.assertFalse(addy.is_checksum_valid()) self.assertTrue(len(addy) == Address.LEN) addy.remove_checksum() self.assertFalse(addy.is_checksum_valid()) self.assertTrue(len(addy) == Address.LEN) def test_random(self): """ Creating a random Address object. """ addy = Address.random() self.assertEqual(len(addy), Address.LEN) class AddressChecksumTestCase(TestCase): def test_init_happy_path(self): """ Creating a valid address checksum. """ self.assertEqual(bytes(AddressChecksum(b'FOXM9MUBX')), b'FOXM9MUBX') def test_init_error_too_short(self): """ Attempting to create an address checksum shorter than 9 trytes. """ with self.assertRaises(ValueError): AddressChecksum(b'FOXM9MUB') def test_init_error_too_long(self): """ Attempting to create an address checksum longer than 9 trytes. """ with self.assertRaises(ValueError): # Extra padding characters are not ignored. # If it's an address checksum, it must be 9 trytes exactly. AddressChecksum(b'FOXM9MUBX9') def test_random(self): """ Creating a random AddressChecksum object. """ checksum = AddressChecksum.random() self.assertEqual(len(checksum), AddressChecksum.LEN) class TagTestCase(TestCase): def test_init_automatic_pad(self): """ Tags are automatically padded to 27 trytes. """ tag = Tag(b'COLOREDCOINS') self.assertEqual(bytes(tag), b'COLOREDCOINS999999999999999') def test_init_error_too_long(self): """ Attempting to create a tag longer than 27 trytes. """ with self.assertRaises(ValueError): # 28 chars = no va. Tag(b'COLOREDCOINS9999999999999999') def test_random(self): """ Creating a random Tag object. """ tag = Tag.random() self.assertEqual(len(tag), Tag.LEN)
{ "content_hash": "e9f2818a5a64e706ed671c9c1cf12cf8", "timestamp": "", "source": "github", "line_count": 1193, "max_line_length": 78, "avg_line_length": 27.893545683151718, "alnum_prop": 0.618595426270397, "repo_name": "iotaledger/iota.lib.py", "id": "6230024244b5aae558bd34e0be7556975cf5b46a", "size": "33408", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/types_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1053825" } ], "symlink_target": "" }
from .Base import * from .helper import hasHackChars from ckstyle.browsers.BinaryRule import ALL, STD from ckstyle.browsers.Detector import doRuleSetDetect class FEDCombineSameRuleSets(StyleSheetChecker): '''{ "summary":"合并两个完全相同的规则集", "desc":"如果两个规则集完全一样,则可以进行合并。<br> 需要指出的是:合并可能会带来功能上的问题。如果有问题,还请告知~<br> 例如:<br> <code>.a {width:100px}</code><br> <code>.b {width:100px}</code><br> <code>==></code><br> <code>.a, .b {width:100px}</code><br> <br> <strong>安全模式下将不执行此规则</strong><br> " }''' def __init__(self): self.id = 'combine-same-rulesets' self.errorMsg_empty = '"%s" contains the same rules in "${file}"' self.errorMsg = '' self.errorLevel = ERROR_LEVEL.WARNING # can be checked correctly only after reorder/fix/compress, so do not check def check(self, styleSheet, config): ruleSets = styleSheet.getRuleSets() mapping = self._gen_hash(ruleSets, ALL) length = len(mapping) errors = {} for i in range(length): for j in range(i + 1, length): if mapping[i][1] == mapping[j][1]: cssText = mapping[i][1] if not errors.has_key(cssText): errors[cssText] = [] errors[cssText].append(mapping[i][0]) errors[cssText].append(mapping[j][0]) elif mapping[j][0] not in errors[cssText]: errors[cssText].append(mapping[j][0]) #errors.append(self.errorMsg_empty % (mapping[i][0], mapping[j][0])) if len(errors.keys()) == 0: return True msgs = [(self.errorMsg_empty % ', '.join(x)) for x in errors.values()] return msgs def fix(self, styleSheet, config): browser = config._curBrowser if config._curBrowser is not None else ALL ruleSets = styleSheet.getRuleSets() mapping = self._gen_hash(ruleSets, browser) length = len(mapping) splitedSelectors = [] for i in range(length): splitedSelectors.append([x.strip() for x in mapping[i][0].split(',') if x.strip() is not '']) for i in range(length): if mapping[i][0] == 'extra': continue selectorHistory = [] for j in range(i + 1, length): if mapping[i][1] != mapping[j][1]: selectorHistory.extend(splitedSelectors[j]) continue # 合并则遵循如下策略: # 1、两者必须都与当前要求的浏览器兼容,即 browserI & browser != 0 and browserJ & browser != 0 # 2、两者的浏览器兼容性必须完全一致,即 browserI ^ browserJ == 0 # 第二点主要是因为有的属性合并以后,由于兼容性不同,受不兼容的selector影响,使本应该兼容的selector失效。 browserI = doRuleSetDetect(mapping[i][0]) browserJ = doRuleSetDetect(mapping[j][0]) if not (browserI & browser != 0 and browserJ & browser != 0 and browserI ^ browserJ == 0): continue # bakcground-position is dangerous, position设置必须在background-image之后 if mapping[j][1].find('background-position') != -1: selectorHistory.extend(splitedSelectors[j]) continue hasFlag = False # ".a {width:0} .a, .b{width:1}, .b{width:0}" 不应该被合并成 ".a, .b{width:0} .a, .b{width:1}" # 但是目前还有一个最严重的问题: # .c {width:1}, .d{width:0}, .b{width:1}, .a{width:0} # class="a c" => width 0 # class="b d" => width 1 # 一旦合并成 .b,.c{width:1} .d,.a{width:0} (不论往前合并还是往后合并,都是这个结果,囧) # class="a c" => width 0 # class="b d" => width 0(本来为1) # 这是无法解决的问题,因为我不能在没有分析DOM的情况下,确定两个selector指向同一个dom # 为此,安全模式 --safeMode 诞生。 for x in splitedSelectors[j]: if x in selectorHistory: hasFlag = True break if hasFlag: selectorHistory.extend(splitedSelectors[j]) continue # make it different mapping[j][1] = str(i) + str(j) mapping[j][0] = 'extra' # extend target selector target = styleSheet.getRuleSets()[i] src = styleSheet.getRuleSets()[j] target.extendSelector(src) # remove rule set styleSheet.removeRuleSetByIndex(j) selectorHistory.extend(splitedSelectors[j]) # remember to clean after remove ruleset styleSheet.clean() def _gen_hash(self, ruleSets, browser): mapping = [] counter = 0 for r in ruleSets: if r.extra:# or doRuleSetDetect(r.selector) != STD: # make it impossible to equal mapping.append(['extra', "do_not_combine_" + str(counter)]) counter = counter + 1 continue mapping.append([r.selector, r.compressRules(browser)]) return mapping
{ "content_hash": "2c9b26dbe85c31b47869adb1f5692f79", "timestamp": "", "source": "github", "line_count": 130, "max_line_length": 106, "avg_line_length": 40.261538461538464, "alnum_prop": 0.5147115017195262, "repo_name": "wangjeaf/CSSCheckStyle", "id": "0a8e0549f5052b107e65f4eb73af649bd2b9de65", "size": "5812", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ckstyle/plugins/FEDCombineSameRuleSets.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "373226" }, { "name": "Shell", "bytes": "928" }, { "name": "VimL", "bytes": "1871" } ], "symlink_target": "" }
__all__ = ['WebHandler', 'cases', 'request_filter'] import logging import functools from copy import copy from webob import Response logger = logging.getLogger(__name__) def is_chainable(handler): while isinstance(handler, WebHandler): if not hasattr(handler, '_next_handler'): return True handler = handler._next_handler return False def respond(response): def response_wrapper(env, data): return response return response_wrapper def prepare_handler(handler): if isinstance(handler, Response): return respond(handler) elif isinstance(handler, type) and \ issubclass(handler, Response): return respond(handler()) return handler class WebHandler(object): '''Base class for all request handlers.''' def __or__(self, next_handler): ''' Supports chaining handler after itself:: WebHandlerSubclass() | another_handler ''' # XXX in some cases copy count can be big # for example, chaining something after a huge cases(..) handler # causes a copy of each single nested handler. # Sure, is bad idea to chain anything after big cases(..) anyway. h = self.copy() next_handler = prepare_handler(next_handler) if hasattr(self, '_next_handler'): h._next_handler = h._next_handler | next_handler else: h._next_handler = next_handler return h def _locations(self): next_handler = self.next_handler if isinstance(next_handler, WebHandler): return next_handler._locations() # we are last in chain return {} def __repr__(self): return '{}()'.format(self.__class__.__name__) def __call__(self, env, data): ''' Subclasses should define __call__ with handler code. It is good style to give a name similar to handler's name to method and then make an alias to __call__:: class MyHandler(WebHandler): def my_handler(self, env, data): do_something(env, data) return self.next_handler(env, data) __call__ = my_handler # This method should be overridden # in subclasses. ''' raise NotImplementedError( '__call__ is not implemented in {!r}'.format(self)) @property def next_handler(self): '''A handler, chained next to self''' if hasattr(self, '_next_handler'): return self._next_handler return lambda e, d: None def copy(self): ''' Returns copy for the handler to make handlers reusable. Handlers are being copied automatically on chaining, so you do not need to do it manually.''' return copy(self) class cases(WebHandler): # XXX bad docstring ''' Handler incapsulating multiple routing branches and choosing one of them that matches current request:: web.cases( web.match('/', 'index') | index, web.match('/contacts', 'contacts') | contacts, web.match('/about', 'about') | about, )''' def __init__(self, *handlers): self.handlers = [prepare_handler(x) for x in handlers] def __or__(self, next_handler): #cases needs to set next handler for each handler it keeps h = self.copy() h.handlers = [(handler | next_handler if is_chainable(handler) else handler) for handler in self.handlers] return h def cases(self, env, data): '''Calls each nested handler until one of them returns nonzero result. If any handler returns `None`, it is interpreted as "request does not match, the handler has nothing to do with it and `web.cases` should try to call the next handler".''' for handler in self.handlers: env._push() data._push() try: result = handler(env, data) finally: env._pop() data._pop() if result is not None: return result # for readable tracebacks __call__ = cases def _locations(self): locations = {} for handler in self.handlers: if isinstance(handler, WebHandler): handler_locations = handler._locations() for k, v in handler_locations.items(): if k in locations: raise ValueError( 'Location "{}" already exists'.format(k)) locations[k] = v return locations def __repr__(self): return '{}({})'.format(self.__class__.__name__, ', '.join(repr(h) for h in self.handlers)) class _FunctionWrapper3(WebHandler): ''' Wrapper for handler represented by function (3 args, old-style) ''' def __init__(self, func): self.handler = func def function_wrapper(self, env, data): return self.handler(env, data, self.next_handler) __call__ = function_wrapper def __repr__(self): return '{}({!r})'.format(self.__class__.__name__, self.handler) def request_filter(func): '''Decorator transforming function to regular WebHandler. This allows to chain other handlers after given. The next handler is passed as third argument into the wrapped function:: @web.request_filter def wrapper(env, data, next_handler): do_something() result = next_handler(env, data) return do_something_else(result) wrapped_app = wrapper | handler ''' return functools.wraps(func)(_FunctionWrapper3(func))
{ "content_hash": "c8e213609d36205494e3fb6cc67d5a80", "timestamp": "", "source": "github", "line_count": 188, "max_line_length": 80, "avg_line_length": 31.29787234042553, "alnum_prop": 0.5659415363698165, "repo_name": "SmartTeleMax/iktomi", "id": "ba282a05acd5a0ebf85ad3977be619ef9dd2950b", "size": "5909", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "iktomi/web/core.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "7846" }, { "name": "Makefile", "bytes": "802" }, { "name": "Python", "bytes": "628542" }, { "name": "Shell", "bytes": "1321" } ], "symlink_target": "" }
""" Python-like loader which is able to customize default global namespace. """ from __future__ import absolute_import, division, print_function from mybuild._compat import * from mybuild.util.importlib.machinery import SourceFileLoader class PyFileLoader(SourceFileLoader): """Loads Pybuild files and executes them as regular Python scripts. Upon creation of a new module initializes its namespace with defaults taken from the dictionary passed in __init__. Also adds a global variable pointing to a module corresponding to the namespace root. """ @property def defaults(self): namespace = self.name.partition('.')[0] return {namespace: __import__(namespace)} def __init__(self, importer, fullname, path): super(PyFileLoader, self).__init__(fullname, path) self.importer = importer def is_package(self, fullname): return False def defaults_for_module(self, module): return self.defaults def _init_module(self, module): module.__dict__.update(self.defaults_for_module(module)) super(PyFileLoader, self)._init_module(module)
{ "content_hash": "54394251063f474f943db0ad1efcca2e", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 79, "avg_line_length": 32.6, "alnum_prop": 0.6958808063102542, "repo_name": "embox/mybuild", "id": "7aff87c98c88a001525d4f2c362ef98b0cf49187", "size": "1141", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "mybuild/nsloader/pyfile.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "590" }, { "name": "Makefile", "bytes": "165" }, { "name": "Python", "bytes": "208966" } ], "symlink_target": "" }
from tempest.api.compute import base from tempest import exceptions from tempest import test class AbsoluteLimitsNegativeTestJSON(base.BaseV2ComputeTest): @classmethod def resource_setup(cls): super(AbsoluteLimitsNegativeTestJSON, cls).resource_setup() cls.client = cls.limits_client cls.server_client = cls.servers_client @test.attr(type=['negative', 'gate']) def test_max_image_meta_exceed_limit(self): # We should not create vm with image meta over maxImageMeta limit # Get max limit value max_meta = self.client.get_specific_absolute_limit('maxImageMeta') # No point in running this test if there is no limit. if int(max_meta) == -1: raise self.skipException('no limit for maxImageMeta') # Create server should fail, since we are passing > metadata Limit! max_meta_data = int(max_meta) + 1 meta_data = {} for xx in range(max_meta_data): meta_data[str(xx)] = str(xx) # A 403 Forbidden or 413 Overlimit (old behaviour) exception # will be raised when out of quota self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit), self.server_client.create_server, name='test', meta=meta_data, flavor_ref=self.flavor_ref, image_ref=self.image_ref)
{ "content_hash": "8081e13fa908dc95fa4b1dfe17ad1b73", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 75, "avg_line_length": 38.432432432432435, "alnum_prop": 0.6272855133614628, "repo_name": "afaheem88/tempest_neutron", "id": "a9c72fb71a315ac436316634fffd83eb07879758", "size": "2058", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tempest/api/compute/limits/test_absolute_limits_negative.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2778383" }, { "name": "Shell", "bytes": "8560" } ], "symlink_target": "" }
from rest_framework.test import APITestCase from rest_framework import status from django.core.urlresolvers import reverse import json from helpers import * class Miscs1Test(APITestCase): fixtures = ['users.json', 'nonprofits.json'] def test_api_root(self): """ """ url = reverse('api-root') with self.assertNumQueries(0): response = self.client.get(url, format='json') response_data = json.loads(response.content.decode('utf8')) # check for base API presence self.assertIn("causes", response_data) self.assertIn("skills", response_data) self.assertIn("states", response_data) self.assertIn("cities", response_data) self.assertIn("nonprofit", response_data) self.assertIn("project", response_data) self.assertIn("jobs", response_data) self.assertIn("works", response_data) self.assertIn("volunteers", response_data) self.assertIn("volunteers_public", response_data) self.assertIn("uploads/images", response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) # /v1/ rest_framework.routers.APIRoot api-root def test_startup(self): """ """ url = reverse('v1_startup') with self.assertNumQueries(7): response = self.client.get(url, format='json') response_data = json.loads(response.content.decode('utf8')) self.assertIn('states', response_data) self.assertIn('skills', response_data) self.assertIn('cities', response_data) self.assertIn('numbers', response_data) self.assertIn('causes', response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) # /v1/startup/ rest_framework.decorators.startup def test_numbers(self): """ """ url = reverse('v1_numbers') with self.assertNumQueries(3): response = self.client.get(url, format='json') response_data = json.loads(response.content.decode('utf8')) self.assertIn('nonprofits', response_data) self.assertIn('volunteers', response_data) self.assertIn('projects', response_data) self.assertEqual(response.status_code, 200) # /v1/numbers/ rest_framework.decorators.numbers def create_newsletter_data(self): return { "name": 'John Doe', "email": 'john-doe@email.com', "address": { "addr": { "formatted_address": "R. Capote Valente, 701, São Paulo" }, "typed_address2": "Complemento" } } #++ todo : check if gdd's newsletter is deprecaterd #++ def test_add_to_gdd_newsletter(self): #++ """ #++ """ #++ url = reverse('v1_add_to_gdd_newsletter') #++ with self.assertNumQueries(1): #++ response = self.client.post(url, self.create_newsletter_data(), format='json') #++ response_data = json.loads(response.content.decode('utf8')) #++ #++ self.assertEqual(response.status_code, status.HTTP_200_OK) #++ # /v1/add_to_gdd_newsletter/ rest_framework.decorators.add_to_gdd_newsletter # todo: optimize queries on #add_to_newsletter def test_add_to_newsletter(self): """ """ url = reverse('v1_add_to_newsletter') with self.assertNumQueries(103): response = self.client.post(url, self.create_newsletter_data(), format='json') response_data = json.loads(response.content.decode('utf8')) self.assertEqual(response.status_code, status.HTTP_200_OK) # /v1/add_to_newsletter/ rest_framework.decorators.add_to_newsletter def test_slug_role(self): """ """ url = "{}?slug={}".format(reverse('v1_slug_role'), 'atado') with self.assertNumQueries(2): response = self.client.get(url, format='json') response_data = json.loads(response.content.decode('utf8')) #++ self.assertIn('type', response_data) #++ self.assertEqual(response_data.type, 'NONPROFIT') self.assertEqual(response.status_code, status.HTTP_200_OK) # /v1/slug_role/ rest_framework.decorators.slug_role #++ notice: DEPRECATED ?? #++ def test_legacy_to_slug(self): #++ """ #++ """ #++ url = reverse('v1_legacy_to_slug', args=['something-to-slug']) #++ with self.assertNumQueries(1): #++ response = self.client.get(url, format='json') #++ response_data = json.loads(response.content.decode('utf8')) #++ #++ self.assertEqual(response.status_code, status.HTTP_200_OK) #++ # /v1/legacy_to_slug/<type>/ rest_framework.decorators.legacy_to_slug
{ "content_hash": "2343098e1f8cacde21e5a79e38fc792f", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 87, "avg_line_length": 32.2442748091603, "alnum_prop": 0.6841856060606061, "repo_name": "atados/api", "id": "1e2045959865ecd909781e5299d6f7b011772354", "size": "4249", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "atados_core/tests/test_routes/news/test_miscs1.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "31943" }, { "name": "HTML", "bytes": "138142" }, { "name": "JavaScript", "bytes": "5492" }, { "name": "Makefile", "bytes": "1381" }, { "name": "Python", "bytes": "394268" }, { "name": "Shell", "bytes": "1060" } ], "symlink_target": "" }
"""Tests for distutils.cmd.""" import unittest import os from test.support import captured_stdout, run_unittest from distutils.cmd import Command from distutils.dist import Distribution from distutils.errors import DistutilsOptionError from distutils import debug class MyCmd(Command): def initialize_options(self): pass class CommandTestCase(unittest.TestCase): def setUp(self): dist = Distribution() self.cmd = MyCmd(dist) def test_ensure_string_list(self): cmd = self.cmd cmd.not_string_list = ['one', 2, 'three'] cmd.yes_string_list = ['one', 'two', 'three'] cmd.not_string_list2 = object() cmd.yes_string_list2 = 'ok' cmd.ensure_string_list('yes_string_list') cmd.ensure_string_list('yes_string_list2') self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'not_string_list') self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'not_string_list2') cmd.option1 = 'ok,dok' cmd.ensure_string_list('option1') self.assertEqual(cmd.option1, ['ok', 'dok']) cmd.option2 = ['xxx', 'www'] cmd.ensure_string_list('option2') cmd.option3 = ['ok', 2] self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'option3') def test_make_file(self): cmd = self.cmd # making sure it raises when infiles is not a string or a list/tuple self.assertRaises(TypeError, cmd.make_file, infiles=1, outfile='', func='func', args=()) # making sure execute gets called properly def _execute(func, args, exec_msg, level): self.assertEqual(exec_msg, 'generating out from in') cmd.force = True cmd.execute = _execute cmd.make_file(infiles='in', outfile='out', func='func', args=()) def test_dump_options(self): msgs = [] def _announce(msg, level): msgs.append(msg) cmd = self.cmd cmd.announce = _announce cmd.option1 = 1 cmd.option2 = 1 cmd.user_options = [('option1', '', ''), ('option2', '', '')] cmd.dump_options() wanted = ["command options for 'MyCmd':", ' option1 = 1', ' option2 = 1'] self.assertEqual(msgs, wanted) def test_ensure_string(self): cmd = self.cmd cmd.option1 = 'ok' cmd.ensure_string('option1') cmd.option2 = None cmd.ensure_string('option2', 'xxx') self.assertTrue(hasattr(cmd, 'option2')) cmd.option3 = 1 self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3') def test_ensure_filename(self): cmd = self.cmd cmd.option1 = __file__ cmd.ensure_filename('option1') cmd.option2 = 'xxx' self.assertRaises(DistutilsOptionError, cmd.ensure_filename, 'option2') def test_ensure_dirname(self): cmd = self.cmd cmd.option1 = os.path.dirname(__file__) or os.curdir cmd.ensure_dirname('option1') cmd.option2 = 'xxx' self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2') def test_debug_print(self): cmd = self.cmd with captured_stdout() as stdout: cmd.debug_print('xxx') stdout.seek(0) self.assertEqual(stdout.read(), '') debug.DEBUG = True try: with captured_stdout() as stdout: cmd.debug_print('xxx') stdout.seek(0) self.assertEqual(stdout.read(), 'xxx\n') finally: debug.DEBUG = False def test_suite(): return unittest.makeSuite(CommandTestCase) if __name__ == '__main__': run_unittest(test_suite())
{ "content_hash": "3e644d8958b0ea4025dad204df6a0010", "timestamp": "", "source": "github", "line_count": 126, "max_line_length": 79, "avg_line_length": 31.436507936507937, "alnum_prop": 0.5647563746528654, "repo_name": "Suwmlee/XX-Net", "id": "398ed672721e5aea93332e293475ed09818f3552", "size": "3961", "binary": false, "copies": "3", "ref": "refs/heads/python3", "path": "Python3/lib/distutils/tests/test_cmd.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "200" }, { "name": "C", "bytes": "33097" }, { "name": "CSS", "bytes": "86345" }, { "name": "HTML", "bytes": "141382" }, { "name": "JavaScript", "bytes": "345991" }, { "name": "PHP", "bytes": "10671" }, { "name": "Python", "bytes": "17312939" }, { "name": "Shell", "bytes": "4647" }, { "name": "Visual Basic", "bytes": "382" } ], "symlink_target": "" }
import os import warnings import pytest from pex import pex_warnings from pex.common import temporary_dir from pex.compatibility import PY2 from pex.pex_warnings import PEXWarning from pex.testing import environment_as from pex.typing import TYPE_CHECKING from pex.util import named_temporary_file from pex.variables import NoValueError, Variables if TYPE_CHECKING: from typing import Any def test_process_pydoc(): # type: () -> None def thing(): # no pydoc pass assert Variables.process_pydoc(thing.__doc__) == ("Unknown", "Unknown") def other_thing(): """Type Properly formatted text. """ assert Variables.process_pydoc(other_thing.__doc__) == ("Type", "Properly formatted text.") def test_iter_help(): # type: () -> None for variable_name, variable_type, variable_text in Variables.iter_help(): assert variable_name.startswith("PEX_") assert "\n" not in variable_type assert "\n" not in variable_text def test_pex_bool_variables(): # type: () -> None assert Variables(environ={})._maybe_get_bool("NOT_HERE") is None with pytest.raises(NoValueError): Variables(environ={})._get_bool("NOT_HERE") for value in ("0", "faLsE", "false"): assert Variables(environ={"HERE": value})._get_bool("HERE") is False for value in ("1", "TrUe", "true"): assert Variables(environ={"HERE": value})._get_bool("HERE") is True with pytest.raises(SystemExit): Variables(environ={"HERE": "garbage"})._get_bool("HERE") # end to end assert Variables().PEX_ALWAYS_CACHE is False assert Variables({"PEX_ALWAYS_CACHE": "1"}).PEX_ALWAYS_CACHE is True def test_pex_string_variables(): # type: () -> None assert Variables(environ={})._maybe_get_string("NOT_HERE") is None with pytest.raises(NoValueError): Variables(environ={})._get_string("NOT_HERE") assert Variables(environ={"HERE": "stuff"})._get_string("HERE") == "stuff" def test_pex_get_int(): # type: () -> None with pytest.raises(NoValueError): Variables()._get_int("HELLO") assert Variables(environ={"HELLO": "23"})._get_int("HELLO") == 23 with pytest.raises(SystemExit): assert Variables(environ={"HELLO": "welp"})._get_int("HELLO") def assert_pex_vars_hermetic(): # type: () -> None v = Variables() assert os.environ.copy() == v.copy() existing = os.environ.get("TEST") expected = (existing or "") + "different" assert expected != existing with environment_as(TEST=expected): assert expected != v.copy().get("TEST") def test_pex_vars_hermetic_no_pexrc(): # type: () -> None assert_pex_vars_hermetic() def test_pex_vars_hermetic(): # type: () -> None with environment_as(PEX_IGNORE_RCFILES="True"): assert_pex_vars_hermetic() def test_pex_get_kv(): # type: () -> None v = Variables(environ={}) assert v._get_kv("HELLO") is None assert v._get_kv("=42") is None assert v._get_kv("TOO=MANY=COOKS") is None assert v._get_kv("THIS=WORKS") == ["THIS", "WORKS"] def test_pex_from_rc(): # type: () -> None with named_temporary_file(mode="w") as pexrc: pexrc.write("HELLO=42") pexrc.flush() v = Variables(rc=pexrc.name) assert v._get_int("HELLO") == 42 def test_pexrc_precedence(): # type: () -> None with named_temporary_file(mode="w") as pexrc: pexrc.write("HELLO=FORTYTWO") pexrc.flush() v = Variables(rc=pexrc.name, environ={"HELLO": "42"}) assert v._get_int("HELLO") == 42 def test_rc_ignore(): # type: () -> None with named_temporary_file(mode="w") as pexrc: pexrc.write("HELLO=FORTYTWO") pexrc.flush() v = Variables(rc=pexrc.name, environ={"PEX_IGNORE_RCFILES": "True"}) assert "HELLO" not in v._environ def test_pex_vars_defaults_stripped(): # type: () -> None v = Variables(environ={}) # bool assert v.PEX_ALWAYS_CACHE is not None assert Variables.PEX_ALWAYS_CACHE.strip_default(v) is None # string assert v.PEX_PROFILE_SORT is not None assert Variables.PEX_PROFILE_SORT.strip_default(v) is None # int assert v.PEX_VERBOSE is not None assert Variables.PEX_VERBOSE.strip_default(v) is None def test_pex_root_unwriteable(): # type: () -> None with temporary_dir() as td: pex_root = os.path.realpath(os.path.join(td, "pex_root")) os.mkdir(pex_root, 0o444) env = Variables(environ=dict(PEX_ROOT=pex_root)) with warnings.catch_warnings(record=True) as log: assert pex_root != env.PEX_ROOT assert 1 == len(log) message = log[0].message assert isinstance(message, PEXWarning) assert pex_root in str(message) assert env.PEX_ROOT is not None assert env.PEX_ROOT in str(message) assert ( env.PEX_ROOT == env.PEX_ROOT ), "When an ephemeral PEX_ROOT is materialized it should be stable." def test_pex_vars_value_or(tmpdir): # type: (Any) -> None v = Variables(environ={}) assert v.PEX_ROOT is not None, "Expected PEX_ROOT to be a defaulted variable." pex_root = str(tmpdir) assert pex_root == Variables.PEX_ROOT.value_or(v, pex_root) unwriteable_pex_root = os.path.join(pex_root, "unwriteable") os.mkdir(unwriteable_pex_root, 0o444) assert unwriteable_pex_root != Variables.PEX_ROOT.value_or(v, unwriteable_pex_root), ( "Expected the fallback to be validated, and in the case of PEX_ROOT, replaced with a " "writeable tmp dir" ) def test_patch(): # type: () -> None v = Variables(environ=dict(PEX_VERBOSE="3", PEX_PYTHON="jython", PEX_EMIT_WARNINGS="True")) assert v.PEX_VERBOSE == 3 assert v.PEX_PYTHON == "jython" assert v.PEX_EMIT_WARNINGS is True assert v.PEX_FORCE_LOCAL is False with v.patch(PEX_VERBOSE="1", PEX_EMIT_WARNINGS=None, PEX_FORCE_LOCAL="True") as env: assert env["PEX_VERBOSE"] == "1" assert env["PEX_PYTHON"] == "jython" assert "PEX_EMIT_WARNINGS" not in env assert env["PEX_FORCE_LOCAL"] == "True" assert v.PEX_VERBOSE == 1 assert v.PEX_PYTHON == "jython" assert v.PEX_EMIT_WARNINGS is None # If the assertion is flipped from `is True` to `is False` this test fails; so MyPy is just # confused here about the statement being unreachable. assert v.PEX_FORCE_LOCAL is True # type: ignore[unreachable] @pytest.mark.skipif( PY2, reason=( "The `warnings.catch_warnings` mechanism doesn't work properly under CPython 2.7 & pypy2 " "across multiple tests. Since we only use `warnings.catch_warnings` in unit tests and " "the mechanisms tested here are also tested in integration tests under CPython 2.7 & pypy " "we accept that these unit tests appear un-fixable without a lot of warnings mocking." ), ) def test_warnings(): # type: () -> None environ = dict( PEX_IGNORE_ERRORS="true", PEX_ALWAYS_CACHE="true", PEX_FORCE_LOCAL="true", PEX_UNZIP="true", ) with warnings.catch_warnings(record=True) as events: pex_warnings.configure_warnings(Variables(environ={})) env = Variables(environ=environ) assert env.PEX_IGNORE_ERRORS is True assert env.PEX_ALWAYS_CACHE is True assert env.PEX_FORCE_LOCAL is True assert env.PEX_UNZIP is True warning_by_message_first_sentence = { str(event.message).split(". ")[0]: event.message for event in events } assert all( isinstance(warning, PEXWarning) for warning in warning_by_message_first_sentence.values() ) assert tuple( sorted( "The `{}` env var is deprecated".format(env_var) for env_var in ("PEX_ALWAYS_CACHE", "PEX_FORCE_LOCAL", "PEX_UNZIP") ) ) == tuple(sorted(warning_by_message_first_sentence)) def test_empty_pex_path_issue_1936(): # type: () -> None assert () == Variables(environ={}).PEX_PATH assert () == Variables(environ={"PEX_PATH": ""}).PEX_PATH assert (".",) == Variables(environ={"PEX_PATH": "."}).PEX_PATH
{ "content_hash": "c94acaf5ff4a4ce5723b60c8d68ce09d", "timestamp": "", "source": "github", "line_count": 265, "max_line_length": 99, "avg_line_length": 31.184905660377357, "alnum_prop": 0.6251210067763795, "repo_name": "pantsbuild/pex", "id": "59789c9157ff57fac1c9f8f351d058f03a929f45", "size": "8396", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "tests/test_variables.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1379" }, { "name": "Python", "bytes": "2190044" }, { "name": "Shell", "bytes": "1472" } ], "symlink_target": "" }
__all__ = ['imread'] import numpy as np try: from PIL import Image except ImportError: raise ImportError("The Python Image Library could not be found. " "Please refer to http://pypi.python.org/pypi/PIL/ " "for further instructions.") from skimage.util import img_as_ubyte from six import string_types def imread(fname, dtype=None): """Load an image from file. """ im = Image.open(fname) if im.mode == 'P': if _palette_is_grayscale(im): im = im.convert('L') else: im = im.convert('RGB') elif im.mode == '1': im = im.convert('L') elif im.mode.startswith('I;16'): shape = im.size dtype = '>u2' if im.mode.endswith('B') else '<u2' im = np.fromstring(im.tostring(), dtype) im.shape = shape[::-1] elif 'A' in im.mode: im = im.convert('RGBA') return np.array(im, dtype=dtype) def _palette_is_grayscale(pil_image): """Return True if PIL image in palette mode is grayscale. Parameters ---------- pil_image : PIL image PIL Image that is in Palette mode. Returns ------- is_grayscale : bool True if all colors in image palette are gray. """ assert pil_image.mode == 'P' # get palette as an array with R, G, B columns palette = np.asarray(pil_image.getpalette()).reshape((256, 3)) # Not all palette colors are used; unused colors have junk values. start, stop = pil_image.getextrema() valid_palette = palette[start:stop] # Image is grayscale if channel differences (R - G and G - B) # are all zero. return np.allclose(np.diff(valid_palette), 0) def imsave(fname, arr, format_str=None): """Save an image to disk. Parameters ---------- fname : str or file-like object Name of destination file. arr : ndarray of uint8 or float Array (image) to save. Arrays of data-type uint8 should have values in [0, 255], whereas floating-point arrays must be in [0, 1]. format_str: str Format to save as, this is defaulted to PNG if using a file-like object; this will be derived from the extension if fname is a string Notes ----- Currently, only 8-bit precision is supported. """ arr = np.asarray(arr).squeeze() if arr.ndim not in (2, 3): raise ValueError("Invalid shape for image array: %s" % arr.shape) if arr.ndim == 3: if arr.shape[2] not in (3, 4): raise ValueError("Invalid number of channels in image array.") # Image is floating point, assume in [0, 1] if np.issubdtype(arr.dtype, float): arr = arr * 255 arr = arr.astype(np.uint8) if arr.ndim == 2: mode = 'L' elif arr.shape[2] in (3, 4): mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]] # Force all integers to bytes arr = arr.astype(np.uint8) # default to PNG if file-like object if not isinstance(fname, string_types) and format_str is None: format_str = "PNG" try: img = Image.frombytes(mode, (arr.shape[1], arr.shape[0]), arr.tostring()) except AttributeError: img = Image.fromstring(mode, (arr.shape[1], arr.shape[0]), arr.tostring()) img.save(fname, format=format_str) def imshow(arr): """Display an image, using PIL's default display command. Parameters ---------- arr : ndarray Image to display. Images of dtype float are assumed to be in [0, 1]. Images of dtype uint8 are in [0, 255]. """ Image.fromarray(img_as_ubyte(arr)).show() def _app_show(): pass
{ "content_hash": "e1a2fd4a45b88d94f8e50b7ebc6d462f", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 76, "avg_line_length": 27.330882352941178, "alnum_prop": 0.5835351089588378, "repo_name": "chintak/scikit-image", "id": "6dd78035ead58b15caf9312abbe8ea541dbafbd3", "size": "3717", "binary": false, "copies": "1", "ref": "refs/heads/placeholder", "path": "skimage/io/_plugins/pil_plugin.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "70225" }, { "name": "CSS", "bytes": "3629" }, { "name": "JavaScript", "bytes": "777" }, { "name": "Python", "bytes": "2115723" }, { "name": "Shell", "bytes": "3346" } ], "symlink_target": "" }
import re def instantiate_request_uri_templates(json_content): """Instantiate the parameters for all the requests URI templates Arguments: json_content -- JSON object containing the API parsed spec """ for resource_group in json_content["resourceGroups"]: for resource in resource_group["resources"]: for action in resource["actions"]: for example in action["examples"]: for request in example["requests"]: if request["name"].find('/') < 0: # URI parameters can be defined in the resource # and / or the action. Combine the list of parameters # of both. uri_parameters = combine_uri_parameters(resource["parameters"], action["parameters"]) # Instantiate the parameters in the action URI (or in # the resource URI if action URI is empty). if len(action["attributes"]["uriTemplate"]) > 0: request["name"] = \ request["name"] + " " + instantiate_uri( action["attributes"]["uriTemplate"], uri_parameters) else: request["name"] = \ request["name"] + " " + instantiate_uri( resource["uriTemplate"], uri_parameters) def combine_uri_parameters(resource_uri_parameters, action_uri_parameters): """Combine the URI parameters of the given action and resource Combine URI parameters of the current action and resource. In case of a parameter being defined in both the resource and the action, list only that of the action. Arguments: resource_uri_parameters -- URI parameters of the given resource action_uri_parameters -- URI parameters of the given action """ uri_parameters = [] # Append to the result list all the URI parameters from the resource # which are not redefined in the action. for resource_uri_parameter in resource_uri_parameters: parameter_overwritten_in_action = False for action_uri_parameter in action_uri_parameters: if resource_uri_parameter["name"] == action_uri_parameter["name"]: parameter_overwritten_in_action = True if not parameter_overwritten_in_action: uri_parameters.append(resource_uri_parameter) # Append all the parameters from the action to the result list. uri_parameters.extend(action_uri_parameters) return uri_parameters def instantiate_uri(URI_template, parameters): """Instantiate an URI template from a list of parameters Arguments: URI_template - URI template to be instanted parameters - List of URI parameters used for instantiating """ # Find all the parameter blocks (ie. {var}, {?var1,var2}, etc). regex = re.compile("{([^}]*)}") URI_parameters_blocks = re.findall(regex,URI_template) # Process every parameter block found in the URI for URI_parameter_block in URI_parameters_blocks: # Parameters of the form "#var" will be replaced with "#value", so we # keep the '#' as a prefix. prefix = '' if URI_parameter_block[0] == '#': prefix = '#' # Form-style parameters (ie. ?var, &var) requires a different # substitution, so mark them as special cases for the substitutions # loop. form_style_query_parameters = False; if URI_parameter_block[0] == '?': form_style_query_parameters = True; first_form_style_query_parameter = True; elif URI_parameter_block[0] == '&': form_style_query_parameters = True; first_form_style_query_parameter = False; # If the current parameters blocks startswith '?', '&', etc we # remove such prefix for the substitutions loop. if prefix == '' and form_style_query_parameters == False and URI_parameter_block[0] != '+': URI_parameter_block_replace = URI_parameter_block else: URI_parameter_block_replace = URI_parameter_block[1:] # Start replacing all the parameters inside the parameter blocks one # by one. for URI_parameter in URI_parameter_block_replace.split(','): # Form-style parameters as "?var" will be replaced by # "?var=value", so keep "var=" as a prefix. if form_style_query_parameters == True: if first_form_style_query_parameter: prefix = "?" + URI_parameter + "=" first_form_style_query_parameter = False else: prefix = "&" + URI_parameter + "=" # Search the current URI parameter in the list of parameters # given and replace its name with its example value. i = 0 parameter_definition_found = False while i < len(parameters) and not parameter_definition_found: if parameters[i]['name'] == URI_parameter and len(parameters[i]['example']) > 0: parameter_definition_found = True URI_parameter_block_replace = URI_parameter_block_replace.replace(URI_parameter, prefix + parameters[i]['example']) i += 1 # If the parameter can not be found or it has not example value, # we replace it with "{prefix+var-name}" or simply ignore it # depending on the type of parameter. if parameter_definition_found == False: if URI_parameter_block[0] != '?' and URI_parameter_block[0] != '&': if URI_parameter_block[0] == '+': prefix = '+' URI_parameter_block_replace = URI_parameter_block_replace.replace(URI_parameter, "{" + prefix + URI_parameter + "}") else: URI_parameter_block_replace = URI_parameter_block_replace.replace(URI_parameter, '') # Replace the original parameter block with the values of its members # omiting the separator character (','). URI_parameter_block_replace = URI_parameter_block_replace.replace(',','') URI_template = URI_template.replace("{" + URI_parameter_block + "}",URI_parameter_block_replace) return URI_template
{ "content_hash": "33ab3dfdf4a4bb03265c10e4f7cc91af", "timestamp": "", "source": "github", "line_count": 135, "max_line_length": 136, "avg_line_length": 47.96296296296296, "alnum_prop": 0.587953667953668, "repo_name": "Lenijas/test-travisci", "id": "1e801ca6e8e4dbb827e691ea9bfa747b3cd8654a", "size": "6475", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "build/lib/fiware_api_blueprint_renderer/src/drafter_postprocessing/instantiate_uri.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "API Blueprint", "bytes": "48017" }, { "name": "CSS", "bytes": "30626" }, { "name": "Groff", "bytes": "1244" }, { "name": "JavaScript", "bytes": "1914" }, { "name": "Python", "bytes": "140734" }, { "name": "Shell", "bytes": "411" }, { "name": "Smarty", "bytes": "97308" } ], "symlink_target": "" }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('feedbacks', '0002_auto_20181007_1922'), ] operations = [ migrations.AlterField( model_name='answer', name='author_name', field=models.CharField(max_length=60, verbose_name='author name'), ), ]
{ "content_hash": "7c4b92a6550e0b871d6aea4856bf98df", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 78, "avg_line_length": 23.25, "alnum_prop": 0.5887096774193549, "repo_name": "flavoi/diventi", "id": "b56eefdf24e589f171f6abd01c559f91b5601d32", "size": "421", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "diventi/feedbacks/migrations/0003_auto_20181007_1940.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "385265" }, { "name": "Procfile", "bytes": "46" }, { "name": "Python", "bytes": "826530" } ], "symlink_target": "" }
from __future__ import division, print_function import argparse import os import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import multiprocessing as mp import comptools as comp import comptools.analysis.plotting as plotting color_dict = comp.analysis.get_color_dict() def get_binned_energy_counts(config): df_data = comp.load_dataframe(datatype='data', config=config) energybins = comp.analysis.get_energybins() counts, _ = np.histogram(df_data['lap_log_energy'], bins=energybins.log_energy_bins) print('{} complete!'.format(config)) return counts if __name__ == '__main__': parser = argparse.ArgumentParser( description='Extracts and saves desired information from simulation/data .i3 files') parser.add_argument('-c', '--config', dest='config', nargs='*', choices=comp.datafunctions.get_data_configs(), help='Detector configuration') args = parser.parse_args() # Energy distribution comparison plot energy_dist_pool = mp.Pool(processes=len(args.config)) energy_counts = energy_dist_pool.map(get_binned_energy_counts, args.config) config_counts_dict = dict(zip(args.config, energy_counts)) energybins = comp.analysis.get_energybins() gs = gridspec.GridSpec(2, 1, height_ratios=[1,1], hspace=0.1) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1], sharex=ax1) for idx, config in enumerate(args.config): counts = config_counts_dict[config] frequency = counts/np.sum(counts) frequency_err = np.sqrt(counts)/np.sum(counts) plotting.plot_steps(energybins.log_energy_bins, frequency, yerr=frequency_err, color='C{}'.format(idx), label=config, alpha=0.8, ax=ax1) ax1.set_ylabel('Frequency') ax1.tick_params(labelbottom='off') ax1.grid() ax1.legend() for idx, config in enumerate(args.config): if config == 'IC86.2012': continue counts = config_counts_dict[config] frequency = counts/np.sum(counts) frequency_err = np.sqrt(counts)/np.sum(counts) counts_2012 = config_counts_dict['IC86.2012'] frequency_2012 = counts_2012/np.sum(counts_2012) frequency_err_2012 = np.sqrt(counts_2012)/np.sum(counts_2012) ratio, ratio_err = comp.analysis.ratio_error(frequency, frequency_err, frequency_2012, frequency_err_2012) plotting.plot_steps(energybins.log_energy_bins, ratio, yerr=ratio_err, color='C{}'.format(idx), label=config, alpha=0.8, ax=ax2) ax2.axhline(1, marker='None', linestyle='-.', color='k', lw=1.5) ax2.set_ylabel('$\mathrm{f/f_{2012}}$') # ax2.set_ylabel('Ratio with IC86.2012') ax2.set_xlabel('$\mathrm{\log_{10}(E_{reco}/GeV)}$') # ax2.set_ylim(0) ax2.set_xlim(energybins.log_energy_min, energybins.log_energy_max) ax2.grid() energy_dist_outfile = os.path.join(comp.paths.figures_dir, 'yearly_data_comparisons', 'energy_dist.png') comp.check_output_dir(energy_dist_outfile) plt.savefig(energy_dist_outfile)
{ "content_hash": "e6c4ec824f73d40fcbbbbd75f027bf27", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 92, "avg_line_length": 36.94318181818182, "alnum_prop": 0.6348815749000307, "repo_name": "jrbourbeau/cr-composition", "id": "41092ae21e09f2d62f0171e97cc0ae224c168a77", "size": "3274", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plotting/plot_yearly_data_comparisons.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "9936" }, { "name": "C++", "bytes": "18910" }, { "name": "Jupyter Notebook", "bytes": "78902112" }, { "name": "Makefile", "bytes": "3148" }, { "name": "Python", "bytes": "998943" }, { "name": "Shell", "bytes": "21865" } ], "symlink_target": "" }
__author__ = 'digao' from flask import Flask from flask.ext.mongoengine import MongoEngine app = Flask(__name__) app.config["MONGODB_SETTINGS"] = {'DB': "my_tumble_log"} app.config["SECRET_KEY"] = "KeepThisS3cr3t" db = MongoEngine() db.init_app(app) def register_blueprints(app): # Prevents circular imports from views import posts app.register_blueprint(posts) from admin import admin admin.init_app(app) register_blueprints(app) if __name__ == '__main__': app.run()
{ "content_hash": "9f97652099768d56dbb308a380ddafd2", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 56, "avg_line_length": 20.791666666666668, "alnum_prop": 0.6833667334669339, "repo_name": "digaobarbosa/tumblog", "id": "8bf4c670b489034d99d0956e3838a39fc5aa0798", "size": "499", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3877" } ], "symlink_target": "" }
subreddit = 'BollyBlindsNGossip' t_channel = '@bollybng' def send_post(submission, r2t): return r2t.send_simple(submission)
{ "content_hash": "259cd1476e11eb6930487cc1c141c7d4", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 38, "avg_line_length": 21.666666666666668, "alnum_prop": 0.7384615384615385, "repo_name": "Fillll/reddit2telegram", "id": "41781bcac3c657d08ddafadd83a76038f82222b5", "size": "147", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "reddit2telegram/channels/~inactive/bollybng/app.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "301463" }, { "name": "Shell", "bytes": "153" } ], "symlink_target": "" }
import unittest import fibonacci as fm class FibonacciTests(unittest.TestCase): """Test all Fibonacci functions against a list of known Fibonacci numbers Values indexed 0 - 38 are from OEIS https://oeis.org/search?q=fibonacci 39 - 50 manually calculated and verified by Brig Young """ century_fib = 354224848179261915075 """A curated value of fibonacci(100) ref: M.L. Hetland pp. 177. Note that Magnus is skipping the beginning zero so his indices are actually (N-1). Also see: http://www.maths.surrey.ac.uk/hosted-sites/R.Knott/Fibonacci/fibtable.html """ fibs = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, 196418, 317811, 514229, 832040, 1346269, 2178309, 3524578, 5702887, 9227465, 14930352, 24157817, 39088169, 63245986, 102334155, 165580141, 267914296, 433494437, 701408733, 1134903170, 1836311903, 2971215073, 4807526976, 7778742049, 12586269025] """A curated list of Fibonacci numbers with correctly matched indexes """ def test_Sequencer(self): """Create a list of fibonacci numbers via sequencing over count of known good values and compare to list of known good values. """ sequenced_list = [] [sequenced_list.append(fm.fibonacciSequencer(x)) for x in range(len(self.fibs))] self.assertEqual(sequenced_list, self.fibs, "fibonacciSequencer() has calculated incorrect values") def test_Generator(self): """Create a list of fibonacci numbers via a python generator indexed by count of known good values and compare to list of known good values. """ generated_list = [] fibgen = fm.fibonacciGenerator() [generated_list.append(next(fibgen)) for x in range(len(self.fibs))] self.assertEqual(generated_list, self.fibs, "fibonacciGenerator() has caculated incorrect values") def test_ClosedForm(self): """Create a list of fibonacci numbers via closed form equation indexed by count of known good values and compare to list of known good values. """ closedform_list = [] [closedform_list.append(fm.fibonacciClosedForm(x)) for x in range(len(self.fibs))] self.assertEqual(closedform_list, self.fibs, "fibonacciClosedForm() has calculated incorrect values") def test_NaiveRecursion(self): """Create a list of fibonacci numbers via naive recursion indexed by count of known good values and compare to list of known good values. """ fibs_short_list = self.fibs[:20] recursion_list = [] [recursion_list.append(fm.fibonacciNaiveRecursion(x)) for x in range(len(fibs_short_list))] self.assertEqual(recursion_list, fibs_short_list, "fibonacciRecursion() has calculated incorrect values") def test_MemoRecursion(self): """Create a list of fibonacci numbers via recursion with a memo-ized cache of previously calculated values. The returned list of values are then compared to a list of known good values. """ fibs_short_list = self.fibs[:20] memo_recursion_list = [] [memo_recursion_list.append(fm.fibonacciMemoRecursion(x)) for x in range(len(fibs_short_list))] self.assertEqual(memo_recursion_list, fibs_short_list, "fibonacciMemoRecursion() has calculated incorrect values") def test_DeepRecursion(self): """Evaluate Memo Recursion at N=100 and compared to known good result """ self.assertEqual(fm.fibonacciMemoRecursion(100), self.century_fib, "fibonacciMemoRecursion() has caluculated incorrect deep result") unittest.main(verbosity=2)
{ "content_hash": "f5322c740a73b21ad4fa403be6cc6c6b", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 103, "avg_line_length": 46.71084337349398, "alnum_prop": 0.6639153985039979, "repo_name": "Sonophoto/PythonNotes", "id": "9165f58850f82329caabc596a48378578e09fabb", "size": "4388", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fibonacci/test_fibonacci.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "25874" }, { "name": "Shell", "bytes": "5284" } ], "symlink_target": "" }
from fastapi import Body, FastAPI from pydantic import BaseModel app = FastAPI() class Item(BaseModel): name: str description: str | None = None price: float tax: float | None = None class User(BaseModel): username: str full_name: str | None = None @app.put("/items/{item_id}") async def update_item(item_id: int, item: Item, user: User, importance: int = Body()): results = {"item_id": item_id, "item": item, "user": user, "importance": importance} return results
{ "content_hash": "d7a03153ab8c6000fa11f5d41b6c65e8", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 88, "avg_line_length": 22.90909090909091, "alnum_prop": 0.6567460317460317, "repo_name": "tiangolo/fastapi", "id": "a1a75fe8e40e9067147ac0735b70ad380ea292e5", "size": "504", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs_src/body_multiple_params/tutorial003_py310.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "25" }, { "name": "HTML", "bytes": "187" }, { "name": "Python", "bytes": "1928986" }, { "name": "Shell", "bytes": "1383" } ], "symlink_target": "" }
TARGETS = [ 'KitchenSink.py', ] PACKAGE = { 'title': 'kitchensink', 'desc': 'KitchenSink example', } def setup(targets): '''Setup example for translation, MUST call util.setup(targets).''' util.setup(targets) def translate(): '''Translate example, MUST call util.translate().''' util.translate() def install(package): '''Install and cleanup example module. MUST call util.install(package)''' util.install(package) ##---------------------------------------## # --------- (-: DO NOT EDIT :-) --------- # ##---------------------------------------## import sys import os examples = head = os.path.abspath(os.path.dirname(__file__)) while os.path.split(examples)[1].lower() != 'examples': examples = os.path.split(examples)[0] if not examples: raise ValueError("Cannot determine examples directory") sys.path.insert(0, os.path.join(examples)) from _examples import util sys.path.pop(0) util.init(head) setup(TARGETS) translate() install(PACKAGE)
{ "content_hash": "343b56c4215a2e9f149dd2d14dbbf5ac", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 77, "avg_line_length": 20.591836734693878, "alnum_prop": 0.599603567888999, "repo_name": "gpitel/pyjs", "id": "ca52e4312e82561b8ddd15009ff1833c8b0b4524", "size": "1057", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "examples/kitchensink/__main__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "4640" }, { "name": "Groff", "bytes": "6633" }, { "name": "HTML", "bytes": "10106" }, { "name": "JavaScript", "bytes": "63385" }, { "name": "Makefile", "bytes": "453" }, { "name": "Python", "bytes": "5517085" }, { "name": "Shell", "bytes": "4264" } ], "symlink_target": "" }
import json from tempest.common.rest_client import RestClient class ExtensionsClientJSON(RestClient): def __init__(self, config, username, password, auth_url, tenant_name=None): super(ExtensionsClientJSON, self).__init__(config, username, password, auth_url, tenant_name) self.service = self.config.volume.catalog_type def list_extensions(self): url = 'extensions' resp, body = self.get(url) body = json.loads(body) return resp, body['extensions']
{ "content_hash": "20b46d47073a7e571cab933822fe2778", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 79, "avg_line_length": 32.94117647058823, "alnum_prop": 0.6160714285714286, "repo_name": "BeenzSyed/tempest", "id": "bdd5f1e76ca0533da1318b3376a27535b682ef2d", "size": "1196", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tempest/services/volume/json/extensions_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2613370" }, { "name": "Shell", "bytes": "8687" } ], "symlink_target": "" }
""" Python Interchangeable Virtual Instrument Library Copyright (c) 2013-2017 Alex Forencich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from . import ivi # Parameter Values ApertureTimeUnits = set(['seconds', 'powerline_cycles']) Auto = set(['off', 'on', 'once']) Auto2 = set(['off', 'on']) MeasurementFunction = set(['dc_volts', 'ac_volts', 'dc_current', 'ac_current', 'two_wire_resistance', 'four_wire_resistance', 'ac_plus_dc_volts', 'ac_plus_dc_current', 'frequency', 'period', 'temperature']) ThermocoupleReferenceJunctionType = set(['internal', 'fixed']) ThermocoupleType = set(['b', 'c', 'd', 'e', 'g', 'j', 'k', 'n', 'r', 's', 't', 'u', 'v']) TemperatureTransducerType = set(['thermocouple', 'thermistor', 'two_wire_rtd', 'four_wire_rtd']) Slope = set(['positive', 'negative']) class Base(ivi.IviContainer): "Base IVI methods for DMMs that take a single measurement at a time" def __init__(self, *args, **kwargs): super(Base, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'Base' ivi.add_group_capability(self, cls+grp) self._measurement_function = 'dc_volts' self._range = 0 self._auto_range = 'off' self._resolution = 1 self._trigger_delay = 0 self._trigger_delay_auto = False self._trigger_source = '' self._add_property('measurement_function', self._get_measurement_function, self._set_measurement_function) self._add_property('range', self._get_range, self._set_range) self._add_property('auto_range', self._get_auto_range, self._set_auto_range) self._add_property('resolution', self._get_resolution, self._set_resolution) self._add_property('trigger.delay', self._get_trigger_delay, self._set_trigger_delay) self._add_property('trigger.delay_auto', self._get_trigger_delay_auto, self._set_trigger_delay_auto) self._add_property('trigger.source', self._get_trigger_source, self._set_trigger_source) self._add_method('configure', self._configure) self._add_method('trigger.configure', self._trigger_configure) self._add_method('measurement.abort', self._measurement_abort) self._add_method('measurement.fetch', self._measurement_fetch) self._add_method('measurement.initiate', self._measurement_initiate) self._add_method('measurement.is_out_of_range', self._measurement_is_out_of_range) self._add_method('measurement.is_over_range', self._measurement_is_over_range) self._add_method('measurement.is_under_range', self._measurement_is_under_range) self._add_method('measurement.read', self._measurement_read) def _get_measurement_function(self): return self._measurement_function def _set_measurement_function(self, value): if value not in MeasurementFunction: raise ivi.ValueNotSupportedException() self._measurement_function = value def _get_range(self): return self._range def _set_range(self, value): value = float(value) self._range = value def _get_auto_range(self): return self._auto_range def _set_auto_range(self, value): if value not in Auto: raise ivi.ValueNotSupportedException() self._auto_range = value def _get_resolution(self): return self._resolution def _set_resolution(self, value): value = float(value) self._resolution = value def _get_trigger_delay(self): return self._trigger_delay def _set_trigger_delay(self, value): value = float(value) self._trigger_delay = value def _get_trigger_delay_auto(self): return self._trigger_delay_auto def _set_trigger_delay_auto(self, value): value = bool(value) self._trigger_delay_auto = value def _get_trigger_source(self): return self._trigger_source def _set_trigger_source(self, value): value = str(value) self._trigger_source = value def _measurement_abort(self): pass def _configure(self, function, range, resolution): self._set_measurement_function(function) if range in Auto: self._set_auto_range(range) else: self._set_range(range) self._set_resolution(resolution) def _trigger_configure(self, source, delay): self._set_trigger_source(source) if isinstance(delay, bool): self._set_trigger_auto_delay(delay) else: self._set_trigger_delay(delay) def _measurement_fetch(self, max_time): return 0.0 def _measurement_initiate(self): pass def _measurement_is_out_of_range(self, value): return self._measurement_is_over_range(value) or self._measurement_is_under_range(value) def _measurement_is_over_range(self, value): return False def _measurement_is_under_range(self, value): return False def _measurement_read(self, max_time): return 0.0 class ACMeasurement(ivi.IviContainer): "Extension IVI methods for DMMs that can take AC voltage or AC current measurements" def __init__(self, *args, **kwargs): super(ACMeasurement, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'ACMeasurement' ivi.add_group_capability(self, cls+grp) self._ac_frequency_max = 100 self._ac_frequency_min = 10 self._add_property('ac.frequency_max', self._get_ac_frequency_max, self._set_ac_frequency_max) self._add_property('ac.frequency_min', self._get_ac_frequency_min, self._set_ac_frequency_min) self._add_method('ac.configure_bandwidth', self._ac_configure_bandwidth) def _get_ac_frequency_max(self): return self._ac_frequency_max def _set_ac_frequency_max(self, value): value = float(value) self._ac_frequency_max = value def _get_ac_frequency_min(self): return self._ac_frequency_min def _set_ac_frequency_min(self, value): value = float(value) self._ac_frequency_min = value def _ac_configure_bandwidth(self, min_f, max_f): self._set_ac_frequency_min(min_f) self._set_ac_frequency_max(max_f) class FrequencyMeasurement(ivi.IviContainer): "Extension IVI methods for DMMs that can take frequency measurements" def __init__(self, *args, **kwargs): super(FrequencyMeasurement, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'FrequencyMeasurement' ivi.add_group_capability(self, cls+grp) self._frequency_voltage_range = 1 self._frequency_voltage_range_auto = False self._add_property('frequency.voltage_range', self._get_frequency_voltage_range, self._set_frequency_voltage_range) self._add_property('frequency.voltage_range_auto', self._get_frequency_voltage_range_auto, self._set_frequency_voltage_range_auto) def _get_frequency_range(self): return self._frequency_range def _set_frequency_range(self, value): value = float(value) self._frequency_range = value def _get_frequency_range_auto(self): return self._frequency_range_auto def _set_frequency_range_auto(self, value): value = bool(value) self._frequency_range_auto = value class TemperatureMeasurement(ivi.IviContainer): "Extension IVI methods for DMMs that can take temperature measurements" def __init__(self, *args, **kwargs): super(TemperatureMeasurement, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'TemperatureMeasurement' ivi.add_group_capability(self, cls+grp) self._temperature_transducer_type = '' self._add_property('temperature.transducer_type', self._get_temperature_transducer_type, self._set_temperature_transducer_type) def _get_temperature_transducer_type(self): return self._temperature_transducer_type def _set_temperature_transducer_type(self, value): if value not in TemperatureTransducerType: raise ivi.ValueNotSupportedException() self._temperature_transducer_type = value class Thermocouple(ivi.IviContainer): "Extension IVI methods for DMMs that can take temperature measurements using a thermocouple" def __init__(self, *args, **kwargs): super(Thermocouple, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'Thermocouple' ivi.add_group_capability(self, cls+grp) self._thermocouple_fixed_reference_junction = 25.0 self._thermocouple_reference_junction_type = '' self._thermocouple_type = '' self._add_property('thermocouple.fixed_reference_junction', self._get_thermocouple_fixed_reference_junction, self._set_thermocouple_fixed_reference_junction) self._add_property('thermocouple.reference_junction_type', self._get_thermocouple_reference_junction_type, self._set_thermocouple_reference_junction_type) self._add_property('thermocouple.type', self._get_thermocouple_type, self._set_thermocouple_type) self._add_method('thermocouple.configure', self._thermocouple_configure) def _get_thermocouple_fixed_reference_junction(self): return self._thermocouple_fixed_reference_junction def _set_thermocouple_fixed_reference_junction(self, value): value = float(value) self._thermocouple_fixed_reference_junction = value def _get_thermocouple_reference_junction_type(self): return self._thermocouple_reference_junction_type def _set_thermocouple_reference_junction_type(self, value): if value not in ThermocoupleReferenceJunctionType: raise ivi.ValueNotSupportedException() self._thermocouple_reference_junction_type = value def _get_thermocouple_type(self): return self._thermocouple_type def _set_thermocouple_type(self, value): if value not in ThermocoupleType: raise ivi.ValueNotSupportedException() self._thermocouple_type = value def _thermocouple_configure(self, thermocouple_type, reference_junction_type): self._set_thermocouple_type(thermocouple_type) self._set_thermocouple_reference_junction_type(reference_junction_type) class ResistanceTemperatureDevice(ivi.IviContainer): "Extension IVI methods for DMMs that can take temperature measurements using a resistance temperature device (RTD)" def __init__(self, *args, **kwargs): super(ResistanceTemperatureDevice, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'ResistanceTemperatureDevice' ivi.add_group_capability(self, cls+grp) self._rtd_alpha = 0.00385 self._rtd_resistance = 100 self._add_property('rtd.alpha', self._get_rtd_alpha, self._set_rtd_alpha) self._add_property('rtd.resistance', self._get_rtd_resistance, self._set_rtd_resistance) self._add_method('rtd.configure', self._rtd_configure) def _get_rtd_alpha(self): return self._rtd_alpha def _set_rtd_alpha(self, value): value = float(value) self._rtd_alpha = value def _get_rtd_resistance(self): return self._rtd_resistance def _set_rtd_resistance(self, value): value = float(value) self._rtd_resistance = value def _rtd_configure(self, alpha, resistance): self._set_rtd_alpha(alpha) self._set_rtd_resistance(resistance) class Thermistor(ivi.IviContainer): "Extension IVI methods for DMMs that can take temperature measurements using a thermistor" def __init__(self, *args, **kwargs): super(Thermistor, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'Thermistor' ivi.add_group_capability(self, cls+grp) self._thermistor_resistance = 10000 self._add_property('thermistor.resistance', self._get_thermistor_resistance, self._set_thermistor_resistance) def _get_thermistor_resistance(self): return self._thermistor_resistance def _set_thermistor_resistance(self, value): value = float(value) self._thermistor_resistance = value class MultiPoint(ivi.IviContainer): "Extension IVI methods for DMMs capable of acquiring measurements based on multiple triggers" def __init__(self, *args, **kwargs): super(MultiPoint, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'MultiPoint' ivi.add_group_capability(self, cls+grp) self._trigger_measurement_complete_destination = "" self._trigger_multi_point_sample_count = 1 self._trigger_multi_point_sample_interval = 1.0 self._trigger_multi_point_sample_trigger = "" self._trigger_multi_point_count = 1 self._add_property('trigger.measurement_complete_destination', self._get_trigger_measurement_complete_destination, self._set_trigger_measurement_complete_destination) self._add_property('trigger.multi_point.sample_count', self._get_trigger_multi_point_sample_count, self._set_trigger_multi_point_sample_count) self._add_property('trigger.multi_point.sample_interval', self._get_trigger_multi_point_sample_interval, self._set_trigger_multi_point_sample_interval) self._add_property('trigger.multi_point.sample_trigger', self._get_trigger_multi_point_sample_trigger, self._set_trigger_multi_point_sample_trigger) self._add_property('trigger.multi_point.count', self._get_trigger_multi_point_count, self._set_trigger_multi_point_count) self._add_method('trigger.multi_point.configure', self._trigger_multi_point_configure) self._add_method('measurement.fetch_multi_point', self._measurement_fetch_multi_point) self._add_method('measurement.read_multi_point', self._measurement_read_multi_point) def _get_trigger_measurement_complete_destination(self): return self._trigger_measurement_complete_destination def _set_trigger_measurement_complete_destination(self, value): value = str(value) self._trigger_measurement_complete_destination = value def _get_trigger_multi_point_sample_count(self): return self._trigger_multi_point_sample_count def _set_trigger_multi_point_sample_count(self, value): value = int(value) self._trigger_multi_point_sample_count = value def _get_trigger_multi_point_sample_interval(self): return self._trigger_multi_point_sample_interval def _set_trigger_multi_point_sample_interval(self, value): value = int(value) self._trigger_multi_point_sample_interval = value def _get_trigger_multi_point_sample_trigger(self): return self._trigger_multi_point_sample_trigger def _set_trigger_multi_point_sample_trigger(self, value): value = str(value) self._trigger_multi_point_sample_trigger = value def _get_trigger_multi_point_count(self): return self._trigger_multi_point_count def _set_trigger_multi_point_count(self, value): value = int(value) self._trigger_multi_point_count = value def _trigger_multi_point_configure(self, trigger_count, sample_count, sample_trigger, sample_interval): self._set_trigger_multi_point_count(trigger_count) self._set_trigger_multi_point_sample_count(sample_count) self._set_trigger_multi_point_sample_trigger(sample_trigger) self._set_trigger_multi_point_sample_interval(sample_interval) def _measurement_fetch_multi_point(self, max_time, num_of_measurements = 0): pass def _measurement_read_multi_point(self, max_time, num_of_measurements = 0): pass class TriggerSlope(ivi.IviContainer): "Extension IVI methods for DMMs that can specify the polarity of the external trigger signal" def __init__(self, *args, **kwargs): super(TriggerSlope, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'TriggerSlope' ivi.add_group_capability(self, cls+grp) self._trigger_slope = 'positive' self._add_property('trigger.slope', self._get_trigger_slope, self._set_trigger_slope) def _get_trigger_slope(self): return self._trigger_slope def _set_trigger_slope(self, value): if value not in Slope: raise ivi.ValueNotSupportedException() self._trigger_slope = value class SoftwareTrigger(ivi.IviContainer): "Extension IVI methods for DMMs that can initiate a measurement based on a software trigger signal" def __init__(self, *args, **kwargs): super(SoftwareTrigger, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'SoftwareTrigger' ivi.add_group_capability(self, cls+grp) self._add_method('send_software_trigger', self._send_software_trigger, ivi.Doc(""" This function sends a software-generated trigger to the instrument. It is only applicable for instruments using interfaces or protocols which support an explicit trigger function. For example, with GPIB this function could send a group execute trigger to the instrument. Other implementations might send a ``*TRG`` command. Since instruments interpret a software-generated trigger in a wide variety of ways, the precise response of the instrument to this trigger is not defined. Note that SCPI details a possible implementation. This function should not use resources which are potentially shared by other devices (for example, the VXI trigger lines). Use of such shared resources may have undesirable effects on other devices. This function should not check the instrument status. Typically, the end-user calls this function only in a sequence of calls to other low-level driver functions. The sequence performs one operation. The end-user uses the low-level functions to optimize one or more aspects of interaction with the instrument. To check the instrument status, call the appropriate error query function at the conclusion of the sequence. The trigger source attribute must accept Software Trigger as a valid setting for this function to work. If the trigger source is not set to Software Trigger, this function does nothing and returns the error Trigger Not Software. """, cls, grp, '13.2.1', 'send_software_trigger')) def _send_software_trigger(self): pass class DeviceInfo(ivi.IviContainer): "A set of read-only attributes for DMMs that can be queried to determine how they are presently configured" def __init__(self, *args, **kwargs): super(DeviceInfo, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'DeviceInfo' ivi.add_group_capability(self, cls+grp) self._advanced_aperture_time = 1.0 self._advanced_aperture_time_units = 'seconds' self._add_property('advanced.aperture_time', self._get_advanced_aperture_time) self._add_property('advanced.aperture_time_units', self._get_advanced_aperture_time_units) def _get_advanced_aperture_time(self): return self._advanced_aperture_time def _get_advanced_aperture_time_units(self): return self._advanced_aperture_time_units class AutoRangeValue(ivi.IviContainer): "Extension IVI methods for DMMs that can return the actual range value when auto ranging" def __init__(self, *args, **kwargs): super(AutoRangeValue, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'AutoRangeValue' ivi.add_group_capability(self, cls+grp) self._advanced_actual_range = 1.0 self._add_property('advanced.actual_range', self._get_advanced_actual_range) def _get_advanced_actual_range(self): return self._advanced_actual_range class AutoZero(ivi.IviContainer): "Extension IVI methods for DMMs that can take an auto zero reading" def __init__(self, *args, **kwargs): super(AutoZero, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'AutoZero' ivi.add_group_capability(self, cls+grp) self._advanced_auto_zero = 'off' self._add_property('advanced.auto_zero', self._get_advanced_auto_zero, self._set_advanced_auto_zero) def _get_advanced_auto_zero(self): return self._advanced_auto_zero def _set_advanced_auto_zero(self, value): if value not in Auto: return ivi.ValueNotSupportedException self._advanced_auto_zero = value class PowerLineFrequency(ivi.IviContainer): "Extension IVI methods for DMMs that can specify the power line frequency" def __init__(self, *args, **kwargs): super(PowerLineFrequency, self).__init__(*args, **kwargs) cls = 'IviDmm' grp = 'PowerLineFrequency' ivi.add_group_capability(self, cls+grp) self._advanced_power_line_frequency = 60.0 self._add_property('advanced.power_line_frequency', self._get_advanced_power_line_frequency, self._set_advanced_power_line_frequency) def _get_advanced_power_line_frequency(self): return self._advanced_power_line_frequency def _set_advanced_power_line_frequency(self, value): value = float(value) self._advanced_power_line_frequency = value
{ "content_hash": "719053daf6d83f61f4ade4540106b1cb", "timestamp": "", "source": "github", "line_count": 669, "max_line_length": 119, "avg_line_length": 38.04633781763827, "alnum_prop": 0.5929359996856952, "repo_name": "python-ivi/python-ivi", "id": "1782463313a153cc24ceda2964daab09102a7996", "size": "25453", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "ivi/dmm.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1961300" } ], "symlink_target": "" }
from django.conf import settings from django.contrib.auth.decorators import user_passes_test from django.contrib.auth.models import Group, Permission from rest_framework import permissions SURVEY_ADMINS = "Survey Admins" # Name of the group containing survey admins. def get_or_create_survey_admins_group(apps=None, schema_editor=None): """Creates the 'Survey Admins' Group, and adds the appropriate permissions. This accepts `apps` and `schema_editor` arguments so it can be called from a Migration. NOTE that this functions attempts to be idempotent, so new permissions will not be created if the Group already exists. """ group, created = Group.objects.get_or_create(name=SURVEY_ADMINS) if created: # Should have all of the survey-related permissions for p in Permission.objects.filter(content_type__app_label="survey"): group.permissions.add(p) return group class IsOwner(permissions.BasePermission): """This permission checks that the authenticated user is the owner a given object. For this to work, the object MUST have a `user` attribute. """ def has_object_permission(self, request, view, obj): try: return request.user.is_authenticated() and obj.user == request.user except AttributeError: return False def is_survey_admin(user): """Verifies that a user is authenticated and a super user.""" if not user.is_authenticated(): return False if user.is_superuser or user.is_staff: return True return user.groups.filter(name=SURVEY_ADMINS).exists() class SurveyAdminsMixin(object): """A Mixin that requires the user to be in a "Survey Admins" Group.""" @classmethod def as_view(cls, **initkwargs): view = super(SurveyAdminsMixin, cls).as_view(**initkwargs) dec = user_passes_test(is_survey_admin, login_url=settings.LOGIN_URL) return dec(view)
{ "content_hash": "21a17974d5603455b98de59024592460", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 79, "avg_line_length": 34.910714285714285, "alnum_prop": 0.6997442455242967, "repo_name": "tndatacommons/tndata_backend", "id": "07da117c20f22f26475cb997fbd9b9c4bcbdd242", "size": "1955", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tndata_backend/survey/permissions.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "29078" }, { "name": "HTML", "bytes": "680433" }, { "name": "JavaScript", "bytes": "186991" }, { "name": "Makefile", "bytes": "393" }, { "name": "Python", "bytes": "2023392" }, { "name": "Shell", "bytes": "2282" } ], "symlink_target": "" }
class IScalable: def __init__(self): pass def scale(self, scale): # type: (float) -> None pass
{ "content_hash": "d632332fb1d4a540e6c8f3bc2c323f70", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 31, "avg_line_length": 16.125, "alnum_prop": 0.4883720930232558, "repo_name": "Diralf/evolution", "id": "a18e650a74a349f5449fe552bfe191129ef4d22f", "size": "131", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "core/graph/iscalable.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "62541" } ], "symlink_target": "" }
from setuptools import setup setup(name='boostaroota', version='1.2.0.b', description='A Fast XGBoost Feature Selection Algorithm', url='http://github.com/chasedehan/BoostARoota', author='Chase DeHan', author_email='chasedehan@yahoo.com', license='MIT', packages=['boostaroota'], zip_safe=False, install_requires=['numpy','pandas', 'xgboost'])
{ "content_hash": "f3ea215fd43222a59665956af3341944", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 69, "avg_line_length": 35.63636363636363, "alnum_prop": 0.6632653061224489, "repo_name": "chasedehan/BoostARoota", "id": "3a92bc68a63742a6e4accdf8c2271f680d60d4b3", "size": "392", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "28501" }, { "name": "R", "bytes": "503" } ], "symlink_target": "" }
"""Contains a factory for building various models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tensorflow as tf from nets import resnet_v1 from nets import resnet_v2 slim = tf.contrib.slim networks_map = {'resnet_v1_50': resnet_v1.resnet_v1_50, 'resnet_v1_101': resnet_v1.resnet_v1_101, 'resnet_v1_152': resnet_v1.resnet_v1_152, 'resnet_v1_200': resnet_v1.resnet_v1_200, 'resnet_v2_50': resnet_v2.resnet_v2_50, 'resnet_v2_101': resnet_v2.resnet_v2_101, 'resnet_v2_152': resnet_v2.resnet_v2_152, 'resnet_v2_200': resnet_v2.resnet_v2_200 } arg_scopes_map = {'resnet_v1_50': resnet_v1.resnet_arg_scope, 'resnet_v1_101': resnet_v1.resnet_arg_scope, 'resnet_v1_152': resnet_v1.resnet_arg_scope, 'resnet_v1_200': resnet_v1.resnet_arg_scope, 'resnet_v2_50': resnet_v2.resnet_arg_scope, 'resnet_v2_101': resnet_v2.resnet_arg_scope, 'resnet_v2_152': resnet_v2.resnet_arg_scope, 'resnet_v2_200': resnet_v2.resnet_arg_scope } def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False): """Returns a network_fn such as `logits, end_points = network_fn(images)`. Args: name: The name of the network. num_classes: The number of classes to use for classification. If 0 or None, the logits layer is omitted and its input features are returned instead. weight_decay: The l2 coefficient for the model weights. is_training: `True` if the model is being used for training and `False` otherwise. Returns: network_fn: A function that applies the model to a batch of images. It has the following signature: net, end_points = network_fn(images) The `images` input is a tensor of shape [batch_size, height, width, 3] with height = width = network_fn.default_image_size. (The permissibility and treatment of other sizes depends on the network_fn.) The returned `end_points` are a dictionary of intermediate activations. The returned `net` is the topmost layer, depending on `num_classes`: If `num_classes` was a non-zero integer, `net` is a logits tensor of shape [batch_size, num_classes]. If `num_classes` was 0 or `None`, `net` is a tensor with the input to the logits layer of shape [batch_size, 1, 1, num_features] or [batch_size, num_features]. Dropout has not been applied to this (even if the network's original classification does); it remains for the caller to do this or not. Raises: ValueError: If network `name` is not recognized. """ if name not in networks_map: raise ValueError('Name of network unknown %s' % name) func = networks_map[name] @functools.wraps(func) def network_fn(images, **kwargs): arg_scope = arg_scopes_map[name](weight_decay=weight_decay) with slim.arg_scope(arg_scope): return func(images, num_classes=num_classes, is_training=is_training, **kwargs) if hasattr(func, 'default_image_size'): network_fn.default_image_size = func.default_image_size return network_fn
{ "content_hash": "4c24ab230bdf964cff4342db641f5ec7", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 79, "avg_line_length": 41.8875, "alnum_prop": 0.6454789615040286, "repo_name": "google-research/morph-net", "id": "cd5d135dff699f47600eaa762daf5b3c188bbaa1", "size": "4036", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/slim/nets/nets_factory.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "679039" }, { "name": "Starlark", "bytes": "29559" } ], "symlink_target": "" }
from __future__ import unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('benchmark', '0021_benchmarkdefinitionentry_max_benchmark_date'), ] operations = [ migrations.AlterField( model_name='benchmarkdefinitionentry', name='max_benchmark_date', field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=datetime.timezone.utc)), ), ]
{ "content_hash": "2b3cf7876d8b83df72de9ea9987319d0", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 120, "avg_line_length": 27.42105263157895, "alnum_prop": 0.6564299424184261, "repo_name": "imvu/bluesteel", "id": "a6326e6b70c8ba6321904cad7d37afdd3c09e63f", "size": "594", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/logic/benchmark/migrations/0022_auto_20200112_1548.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "16828" }, { "name": "HTML", "bytes": "119014" }, { "name": "JavaScript", "bytes": "36015" }, { "name": "Python", "bytes": "1220104" } ], "symlink_target": "" }
from djangoproject.settings import * DEBUG=True TEMPLATE_DEBUG=DEBUG
{ "content_hash": "12c786ecd31f7574b8addbded9883325", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 36, "avg_line_length": 17.5, "alnum_prop": 0.8285714285714286, "repo_name": "lemonad/my-django-skeleton", "id": "87a7e2c3a358e3b5bfc6bfba15822c30b265422e", "size": "71", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "djangoproject/development.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "8648" } ], "symlink_target": "" }
"""Runs the Web Search benchmark of Cloudsuite. More info: http://cloudsuite.ch/websearch/ """ import re from perfkitbenchmarker import configs from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import docker FLAGS = flags.FLAGS flags.DEFINE_string('cloudsuite_web_search_server_heap_size', '3g', 'Java heap size for Solr server in the usual java format.') flags.DEFINE_integer('cloudsuite_web_search_ramp_up', 90, 'Benchmark ramp up time in seconds.', lower_bound=1) flags.DEFINE_integer('cloudsuite_web_search_ramp_down', 60, 'Benchmark ramp down time in seconds.', lower_bound=1) flags.DEFINE_integer('cloudsuite_web_search_steady_state', 60, 'Benchmark steady state time in seconds.', lower_bound=1) flags.DEFINE_integer('cloudsuite_web_search_scale', 50, 'Number of simulated web search users.', lower_bound=1) BENCHMARK_NAME = 'cloudsuite_web_search' BENCHMARK_CONFIG = """ cloudsuite_web_search: description: > Run Cloudsuite Web Search benchmark. Specify the number of clients with --num_vms. vm_groups: servers: vm_spec: *default_single_core disk_spec: *default_500_gb clients: vm_spec: *default_single_core vm_count: 1 """ DISK_PATH = '/scratch' def GetConfig(user_config): config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) if FLAGS['num_vms'].present: config['vm_groups']['clients']['vm_count'] = FLAGS.num_vms return config def Prepare(benchmark_spec): """Install docker. Pull the required images from DockerHub. Start Solr index node and client. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ servers = benchmark_spec.vm_groups['servers'][0] clients = benchmark_spec.vm_groups['clients'] def PrepareCommon(vm): if not docker.IsInstalled(vm): vm.Install('docker') def PrepareServer(vm): PrepareCommon(vm) server_cmd = ('sudo echo \'DOCKER_OPTS="-g %s"\'' '| sudo tee /etc/default/docker > /dev/null' % (DISK_PATH)) stdout, _ = vm.RemoteCommand(server_cmd, should_log=True) server_cmd = 'sudo service docker restart' stdout, _ = vm.RemoteCommand(server_cmd, should_log=True) vm.RemoteCommand('sudo docker pull cloudsuite/web-search:server') server_cmd = ('sudo docker run -d --net host ' '--name server cloudsuite/web-search:server %s 1' % (FLAGS.cloudsuite_web_search_server_heap_size)) stdout, _ = servers.RemoteCommand(server_cmd, should_log=True) def PrepareClient(vm): PrepareCommon(vm) vm.RemoteCommand('sudo docker pull cloudsuite/web-search:client') PrepareServer(servers) target_arg_tuples = ([(PrepareClient, [vm], {}) for vm in clients]) vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples)) def Run(benchmark_spec): """Run the Web Search benchmark. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ clients = benchmark_spec.vm_groups['clients'][0] servers = benchmark_spec.vm_groups['servers'][0] benchmark_cmd = ('sudo docker run --rm --net host --name client ' 'cloudsuite/web-search:client %s %d %d %d %d ' % (servers.internal_ip, FLAGS.cloudsuite_web_search_scale, FLAGS.cloudsuite_web_search_ramp_up, FLAGS.cloudsuite_web_search_steady_state, FLAGS.cloudsuite_web_search_ramp_down)) stdout, _ = clients.RemoteCommand(benchmark_cmd, should_log=True) ops_per_sec = re.findall(r'\<metric unit="ops/sec"\>(\d+\.?\d*)', stdout) num_ops_per_sec = float(ops_per_sec[0]) p90 = re.findall(r'\<p90th\>(\d+\.?\d*)', stdout) num_p90 = float(p90[0]) p99 = re.findall(r'\<p99th\>(\d+\.?\d*)', stdout) num_p99 = float(p99[0]) results = [] results.append(sample.Sample('Operations per second', num_ops_per_sec, 'ops/s')) results.append(sample.Sample('90th percentile latency', num_p90, 's')) results.append(sample.Sample('99th percentile latency', num_p99, 's')) return results def Cleanup(benchmark_spec): """Stop and remove docker containers. Remove images. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ servers = benchmark_spec.vm_groups['servers'][0] clients = benchmark_spec.vm_groups['clients'] def CleanupClient(vm): vm.RemoteCommand('sudo docker stop client') vm.RemoteCommand('sudo docker rm client') vm.RemoteCommand('sudo docker rmi cloudsuite/web-search:client') def CleanupServer(vm): vm.RemoteCommand('sudo docker stop server') vm.RemoteCommand('sudo docker rm server') vm.RemoteCommand('sudo docker rmi cloudsuite/web-search:server') target_arg_tuples = ([(CleanupClient, [vm], {}) for vm in clients] + [(CleanupServer, [servers], {})]) vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
{ "content_hash": "5cdd910cb8303a8bfdcbb40419683d05", "timestamp": "", "source": "github", "line_count": 163, "max_line_length": 79, "avg_line_length": 33.82208588957055, "alnum_prop": 0.6428441864683475, "repo_name": "xiaolihope/PerfKitBenchmarker-1.7.0", "id": "5690a888e1fef01e7ff44b44a481be07ba6405e7", "size": "6124", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "perfkitbenchmarker/linux_benchmarks/cloudsuite_web_search_benchmark.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Lua", "bytes": "1547" }, { "name": "Python", "bytes": "1727478" }, { "name": "Shell", "bytes": "23457" } ], "symlink_target": "" }
""" Django settings for backend project. Generated by 'django-admin startproject' using Django 1.8.9. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '(%b*z2xmx-!7-mnl)7uzha)gg9_1b@f%pa%6!+z!^aa!z!q@2c' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'corsheaders', # dependency! needed for ajax request from anywhere 'communication', 'server', 'statistics', 'utils' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'egsdsm_backend.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'egsdsm_backend.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'D:/python/EGS-DSM/core/test.db', } } # DATABASES = { # 'default': { # 'ENGINE': 'django.db.backends.mysql', # 'NAME': 'egsdsm', # 'USER': 'egsdsm', # 'PASSWORD': '123QWErty', # 'HOST': '127.0.0.1' # } # } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' CORS_ORIGIN_ALLOW_ALL = True CORS_ALLOW_CREDENTIALS = True # ============================================== SOCKET_CONNECTION = ('localhost', 42404)
{ "content_hash": "16d77215488e0102b9115a55a4ecc7f8", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 71, "avg_line_length": 25.617886178861788, "alnum_prop": 0.6601079022532529, "repo_name": "Glucksistemi/EGS-DSM", "id": "5341587a7017af7ffd69965d1244258196176a6b", "size": "3151", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "egsdsm_backend/egsdsm_backend/settings.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "1112" }, { "name": "JavaScript", "bytes": "62" }, { "name": "Python", "bytes": "30770" } ], "symlink_target": "" }
""" crypto.keyedHash.pbkdf2 Password Based Key Derivation Function 2 References: RFC2898, B. Kaliski, September 2000, PKCS #5 This function is used for IEEE 802.11/WPA passphrase to key hashing Copyright (c) 2002 by Paul A. Lambert Read LICENSE.txt for license information. """ from crypto.keyedHash.hmacHash import HMAC_SHA1 from crypto.common import xor from math import ceil from struct import pack def pbkdf2(password, salt, iterations, keySize, PRF=HMAC_SHA1): """ Create key of size keySize from password and salt """ if len(password)>63: raise 'Password too long for pbkdf2' #if len(password)<8 : raise 'Password too short for pbkdf2' if (keySize > 10000): # spec says >4294967295L*digestSize raise 'keySize too long for PBKDF2' prf = PRF(key=password) # HMAC_SHA1 numBlocks = ceil(1.*keySize/prf.digest_size) # ceiling function key = '' for block in range(1,numBlocks+1): # Calculate F(P, salt, iterations, i) F = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes U = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes for count in range(2,iterations+1): U = prf(U) F = xor(F,U) key = key + F return key[:keySize] def dot11PassPhraseToPSK(passPhrase,ssid): """ The 802.11 TGi recommended pass-phrase-to-preshared-key mapping. This function simply uses pbkdf2 with interations=4096 and keySize=32 """ assert( 7<len(passPhrase)<64 ), 'Passphrase must be greater than 7 or less than 64 characters' return pbkdf2(passPhrase, ssid, iterations=4096, keySize=32)
{ "content_hash": "f91eca3a20a4f1ffd8ff4eced97c1e11", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 95, "avg_line_length": 35.906976744186046, "alnum_prop": 0.7266839378238342, "repo_name": "bubbalinear/smartmeshsdk", "id": "87c9fd71b9a56a34e37cb28d46a486fc13bfcea4", "size": "1544", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "cryptopy/crypto/keyedHash/pbkdf2.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "1918602" } ], "symlink_target": "" }
from Kamaelia.File.Writing import SimpleFileWriter from Kamaelia.Chassis.Graphline import Graphline from Kamaelia.Chassis.Carousel import Carousel from Axon.Component import component import IRCClient import formatters from IRCClient import SimpleIRCClientPrefab import time, os class SimpleReloader(component): Outboxes = {"irc" : "to IRC, for user responses and login", "outbox" : "What we're interested in, the traffic over the channel", "system" : "Messages directed toward the client, numeric replies, etc.", "signal" : "Shutdown handling in the future", } def __init__(self, channel='#kamtest', name="reloadbot"): """x.__init__(...) initializes x; see x.__class__.__doc__ for signature""" super(SimpleReloader, self).__init__() self.channel = channel self.name = name self.debugger.addDebugSection("SimpleReloader.main", 0) def login(self): """registers with the IRC server""" self.send(("NICK", self.name), "irc") self.send(("USER", self.name, self.name, self.name, self.name), "irc") self.send(("PRIVMSG", 'nickserv', "identify abc123"), "irc") self.send(("JOIN", self.channel), "irc") def main(self): """Main loop""" self.login() yield 1 while True: yield 1 while self.dataReady("inbox"): data = self.recv("inbox") if (data[2] == self.channel or data[0] == 'NICK'): self.doStuff(data) def doStuff(self, msg): if msg[0] == 'PRIVMSG' and msg[3].split(':')[0] == self.name: words = msg[3].split()[1:] if words[0] == 'reload' and len(words) > 1: try: exec("reload(%s)" % words[1]) except (NameError, TypeError): self.send("'%s' not a module\n" % words[1], "irc") formatted = formatters.outformat(msg, defaultChannel=self.channel) self.send(formatted) from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.Util.Console import ConsoleEchoer Graphline(irc = SimpleIRCClientPrefab('irc.freenode.net', 6667), reloader = SimpleReloader(), cons = ConsoleEchoer(), linkages = {("reloader", "irc") : ("irc", "inbox"), ("irc", "outbox") : ("reloader", "inbox"), ("reloader", "outbox") : ("cons", "inbox"), } ).run()
{ "content_hash": "e03356891db6908f1658c2212799b908", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 88, "avg_line_length": 40.983870967741936, "alnum_prop": 0.5600157418339237, "repo_name": "sparkslabs/kamaelia_", "id": "4ad2aaf5c2b4796bcd1de17ec084753b03184f83", "size": "3369", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "Sketches/JL/IRC/simplereloader.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "3814" }, { "name": "C", "bytes": "212854" }, { "name": "C++", "bytes": "327546" }, { "name": "CSS", "bytes": "114434" }, { "name": "ChucK", "bytes": "422" }, { "name": "HTML", "bytes": "1288960" }, { "name": "Java", "bytes": "31832" }, { "name": "JavaScript", "bytes": "829491" }, { "name": "Makefile", "bytes": "5768" }, { "name": "NSIS", "bytes": "18867" }, { "name": "PHP", "bytes": "49059" }, { "name": "Perl", "bytes": "504" }, { "name": "Processing", "bytes": "2885" }, { "name": "Pure Data", "bytes": "7485482" }, { "name": "Python", "bytes": "18896248" }, { "name": "Ruby", "bytes": "4165" }, { "name": "Shell", "bytes": "707430" } ], "symlink_target": "" }
"""The tests for local file sensor platform.""" import pytest from homeassistant.const import STATE_UNKNOWN from homeassistant.setup import async_setup_component from tests.async_mock import Mock, mock_open, patch from tests.common import mock_registry @pytest.fixture def entity_reg(hass): """Return an empty, loaded, registry.""" return mock_registry(hass) @patch("os.path.isfile", Mock(return_value=True)) @patch("os.access", Mock(return_value=True)) async def test_file_value(hass, entity_reg): """Test the File sensor.""" config = { "sensor": {"platform": "file", "name": "file1", "file_path": "mock.file1"} } m_open = mock_open(read_data="43\n45\n21") with patch( "homeassistant.components.file.sensor.open", m_open, create=True ), patch.object(hass.config, "is_allowed_path", return_value=True): assert await async_setup_component(hass, "sensor", config) await hass.async_block_till_done() state = hass.states.get("sensor.file1") assert state.state == "21" @patch("os.path.isfile", Mock(return_value=True)) @patch("os.access", Mock(return_value=True)) async def test_file_value_template(hass, entity_reg): """Test the File sensor with JSON entries.""" config = { "sensor": { "platform": "file", "name": "file2", "file_path": "mock.file2", "value_template": "{{ value_json.temperature }}", } } data = '{"temperature": 29, "humidity": 31}\n' '{"temperature": 26, "humidity": 36}' m_open = mock_open(read_data=data) with patch( "homeassistant.components.file.sensor.open", m_open, create=True ), patch.object(hass.config, "is_allowed_path", return_value=True): assert await async_setup_component(hass, "sensor", config) await hass.async_block_till_done() state = hass.states.get("sensor.file2") assert state.state == "26" @patch("os.path.isfile", Mock(return_value=True)) @patch("os.access", Mock(return_value=True)) async def test_file_empty(hass, entity_reg): """Test the File sensor with an empty file.""" config = {"sensor": {"platform": "file", "name": "file3", "file_path": "mock.file"}} m_open = mock_open(read_data="") with patch( "homeassistant.components.file.sensor.open", m_open, create=True ), patch.object(hass.config, "is_allowed_path", return_value=True): assert await async_setup_component(hass, "sensor", config) await hass.async_block_till_done() state = hass.states.get("sensor.file3") assert state.state == STATE_UNKNOWN
{ "content_hash": "eb35dbe054aad9c1f055e088bf662f46", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 88, "avg_line_length": 34.35526315789474, "alnum_prop": 0.6464955955572578, "repo_name": "soldag/home-assistant", "id": "31370334f92118102ecae53734d5ea24aa293e5a", "size": "2611", "binary": false, "copies": "5", "ref": "refs/heads/dev", "path": "tests/components/file/test_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "19025087" }, { "name": "Shell", "bytes": "6846" } ], "symlink_target": "" }
from django.conf import settings import codelabs.config as config def configurations(request): return { 'DISCOURSE_FLAG': config.DISCOURSE_FLAG, 'DISCOURSE_URL' : config.DISCOURSE_URL }
{ "content_hash": "c9b98b94612e71b75c4402d498cfdb80", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 52, "avg_line_length": 28.375, "alnum_prop": 0.6519823788546255, "repo_name": "rachitnaruzu/codearena", "id": "22c22d527b81e499a5e8b7a7fe96ce2cb64961ab", "size": "227", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "codelabs/context_processor.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3741" }, { "name": "HTML", "bytes": "84751" }, { "name": "JavaScript", "bytes": "140759" }, { "name": "Python", "bytes": "118942" }, { "name": "Shell", "bytes": "70" } ], "symlink_target": "" }
"""A matplotlib backend for publishing figures via display_data""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import matplotlib from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg # analysis: ignore from matplotlib._pylab_helpers import Gcf from IPython.core.getipython import get_ipython from IPython.core.display import display from .config import InlineBackend def show(close=None): """Show all figures as SVG/PNG payloads sent to the IPython clients. Parameters ---------- close : bool, optional If true, a ``plt.close('all')`` call is automatically issued after sending all the figures. If this is set, the figures will entirely removed from the internal list of figures. """ if close is None: close = InlineBackend.instance().close_figures try: for figure_manager in Gcf.get_all_fig_managers(): display(figure_manager.canvas.figure) finally: show._to_draw = [] # only call close('all') if any to close # close triggers gc.collect, which can be slow if close and Gcf.get_all_fig_managers(): matplotlib.pyplot.close('all') # This flag will be reset by draw_if_interactive when called show._draw_called = False # list of figures to draw when flush_figures is called show._to_draw = [] def draw_if_interactive(): """ Is called after every pylab drawing command """ # signal that the current active figure should be sent at the end of # execution. Also sets the _draw_called flag, signaling that there will be # something to send. At the end of the code execution, a separate call to # flush_figures() will act upon these values manager = Gcf.get_active() if manager is None: return fig = manager.canvas.figure # Hack: matplotlib FigureManager objects in interacive backends (at least # in some of them) monkeypatch the figure object and add a .show() method # to it. This applies the same monkeypatch in order to support user code # that might expect `.show()` to be part of the official API of figure # objects. # For further reference: # https://github.com/ipython/ipython/issues/1612 # https://github.com/matplotlib/matplotlib/issues/835 if not hasattr(fig, 'show'): # Queue up `fig` for display fig.show = lambda *a: display(fig) # If matplotlib was manually set to non-interactive mode, this function # should be a no-op (otherwise we'll generate duplicate plots, since a user # who set ioff() manually expects to make separate draw/show calls). if not matplotlib.is_interactive(): return # ensure current figure will be drawn, and each subsequent call # of draw_if_interactive() moves the active figure to ensure it is # drawn last try: show._to_draw.remove(fig) except ValueError: # ensure it only appears in the draw list once pass # Queue up the figure for drawing in next show() call show._to_draw.append(fig) show._draw_called = True def flush_figures(): """Send all figures that changed This is meant to be called automatically and will call show() if, during prior code execution, there had been any calls to draw_if_interactive. This function is meant to be used as a post_execute callback in IPython, so user-caused errors are handled with showtraceback() instead of being allowed to raise. If this function is not called from within IPython, then these exceptions will raise. """ if not show._draw_called: return if InlineBackend.instance().close_figures: # ignore the tracking, just draw and close all figures try: return show(True) except Exception as e: # safely show traceback if in IPython, else raise ip = get_ipython() if ip is None: raise e else: ip.showtraceback() return try: # exclude any figures that were closed: active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()]) for fig in [ fig for fig in show._to_draw if fig in active ]: try: display(fig) except Exception as e: # safely show traceback if in IPython, else raise ip = get_ipython() if ip is None: raise e else: ip.showtraceback() return finally: # clear flags for next round show._to_draw = [] show._draw_called = False # Changes to matplotlib in version 1.2 requires a mpl backend to supply a default # figurecanvas. This is set here to a Agg canvas # See https://github.com/matplotlib/matplotlib/pull/1125 FigureCanvas = FigureCanvasAgg
{ "content_hash": "b186cfdd8ceeec72efced9ac0a07b87a", "timestamp": "", "source": "github", "line_count": 142, "max_line_length": 98, "avg_line_length": 35.274647887323944, "alnum_prop": 0.6506288680375324, "repo_name": "initNirvana/Easyphotos", "id": "b45af5f2e5087cacfcd19dc139bdaf9091a7b095", "size": "5009", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "env/lib/python3.4/site-packages/IPython/kernel/zmq/pylab/backend_inline.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "5939" }, { "name": "CSS", "bytes": "13653" }, { "name": "HTML", "bytes": "129191" }, { "name": "JavaScript", "bytes": "1401324" }, { "name": "Python", "bytes": "11874458" }, { "name": "Shell", "bytes": "3668" }, { "name": "Smarty", "bytes": "21402" } ], "symlink_target": "" }
from conans import ConanFile from llvmpackage import * from llvmcomponentpackage import * from llvmmodulepackage import * class LLVMCommon(ConanFile): name = 'llvm-common' version = '0.0.0' url = 'http://gitlab.com/henning/clang-conan-packages' license = 'MIT' description = 'Common package recipes for LLVM packages' exports = '*.py'
{ "content_hash": "bf0b0b1280abae441589d9cf7937128c", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 60, "avg_line_length": 27.76923076923077, "alnum_prop": 0.7119113573407202, "repo_name": "pierricgimmig/orbitprofiler", "id": "a949446405a706175931e9aaaf886c333d74cda5", "size": "361", "binary": false, "copies": "1", "ref": "refs/heads/headless", "path": "contrib/conan/recipes/llvm-common/conanfile.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Assembly", "bytes": "5798" }, { "name": "Batchfile", "bytes": "5600" }, { "name": "C", "bytes": "105310" }, { "name": "C++", "bytes": "1978191" }, { "name": "CMake", "bytes": "55219" }, { "name": "Objective-C", "bytes": "1392" }, { "name": "Python", "bytes": "102532" }, { "name": "QMake", "bytes": "1219" }, { "name": "Shell", "bytes": "8737" } ], "symlink_target": "" }
"Tests for preview/save views." from __future__ import unicode_literals import json from datetime import date from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from . import DaysLog from .base import ScribblerDataTestCase, Scribble class BaseViewTestCase(ScribblerDataTestCase): "Common functionality for testing views." urls = "scribbler.tests.urls" def setUp(self): self.user = self.create_user(username='test', password='test') self.client.login(username='test', password='test') self.change_perm = Permission.objects.get( codename='change_scribble', content_type__app_label='scribbler', content_type__model='scribble', ) self.add_perm = Permission.objects.get( codename='add_scribble', content_type__app_label='scribbler', content_type__model='scribble', ) self.delete_perm = Permission.objects.get( codename='delete_scribble', content_type__app_label='scribbler', content_type__model='scribble', ) self.change_dayslog_perm = Permission.objects.get( codename='change_dayslog', content_type__app_label='scribbler', content_type__model='dayslog', ) self.user.user_permissions.add(self.change_perm) self.user.user_permissions.add(self.add_perm) self.user.user_permissions.add(self.delete_perm) self.user.user_permissions.add(self.change_dayslog_perm) class PreviewTestCase(BaseViewTestCase): "Previewing scribbler content." def setUp(self): super(PreviewTestCase, self).setUp() self.url = reverse('preview-scribble') def get_valid_data(self): "Base valid data." data = { 'slug': 'test', 'url': '/', 'content': '{% now "Y" %}' } return data def test_post_required(self): "Preview view requires a POST." response = self.client.get(self.url) self.assertEqual(response.status_code, 405, "GET should not be allowed.") def test_valid_response(self): "Rendered content should be given in the response." data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertTrue(results['valid']) self.assertFalse('error' in results) self.assertEqual(results['html'], "{0}".format(date.today().year)) def test_invalid_template(self): "Debug info should be given if the template content was invalid." data = self.get_valid_data() data['content'] = '{% now %}' response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertFalse(results['valid']) self.assertEqual(results['html'], '') self.assertEqual(results['error']['line'], 1) def test_login_required(self): "Return 403 if user is not authenticated." self.client.logout() data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) def test_permission_required(self): "Return 403 if user is does not have permissions to preview scribbles." self.user.user_permissions.remove(self.change_perm) self.user.user_permissions.remove(self.add_perm) data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) def test_preview_existing(self): "Preview content for a scribble which exists. See #34." data = self.get_valid_data() scribble = self.create_scribble(**data) response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertTrue(results['valid']) self.assertFalse('error' in results) class CreateTestCase(BaseViewTestCase): "Creating a new scribble." def setUp(self): super(CreateTestCase, self).setUp() self.url = reverse('create-scribble') def get_valid_data(self): "Base valid data." data = { 'slug': 'test', 'url': '/', 'content': '{% now "Y" %}' } return data def test_post_required(self): "Create view requires a POST." response = self.client.get(self.url) self.assertEqual(response.status_code, 405, "GET should not be allowed.") def test_valid_response(self): "Save new scribble data." data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertTrue(results['valid']) scribble = Scribble.objects.get(slug=data['slug'], url=data['url']) self.assertEqual(scribble.content, data['content']) def test_invalid_template(self): "Data should not be saved if the template is invalid." data = self.get_valid_data() data['content'] = '{% now %}' response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertFalse(results['valid']) self.assertEqual(Scribble.objects.count(), 0) def test_login_required(self): "Return 403 if user is not authenticated." self.client.logout() data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) def test_permission_required(self): "Return 403 if user is does not have permissions to create scribbles." self.user.user_permissions.remove(self.add_perm) data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) class EditFieldTestCase(BaseViewTestCase): "Edit a model instance field via scribbler." url_name = 'edit-scribble-field' def setUp(self): self.days_log = DaysLog.objects.create(happenings=self.get_random_string()) super(EditFieldTestCase, self).setUp() self.url = reverse(self.url_name, kwargs=self.get_valid_kwargs()) def get_valid_data(self): data = { 'content': self.get_random_string(), } return data def get_valid_kwargs(self): return { 'ct_pk': ContentType.objects.get_for_model(self.days_log).pk, 'instance_pk': self.days_log.pk, 'field_name': 'happenings', } def test_post_required(self): "Edit field view requires a POST." response = self.client.get(self.url) self.assertEqual(response.status_code, 405, "GET should not be allowed.") def test_successful_edit(self): data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertTrue(results['valid']) days_log = DaysLog.objects.get(pk=self.days_log.pk) self.assertEqual(days_log.happenings, data['content']) def test_field_validation_failure(self): data = { 'content': '', } response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertFalse(results['valid']) self.assertTrue('error' in results) err_info = results['error'] self.assertTrue('message' in err_info) self.assertTrue('required' in err_info['message'], err_info['message']) user = User.objects.get(pk=self.user.pk) self.assertEqual(user.username, self.user.username) def test_model_validation_failure(self): log2 = DaysLog.objects.create(happenings="Duplicate value") data = { 'content': log2.happenings, } response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertFalse(results['valid']) self.assertTrue('error' in results) err_info = results['error'] self.assertTrue('message' in err_info) self.assertTrue('already exists' in err_info['message'], err_info['message']) user = User.objects.get(pk=self.user.pk) self.assertEqual(user.username, self.user.username) def test_invalid_ct_pk(self): kwargs = self.get_valid_kwargs() kwargs['ct_pk'] = ContentType.objects.order_by('-id')[0].pk + 1 url = reverse(self.url_name, kwargs=kwargs) response = self.client.post(url, data=self.get_valid_data()) self.assertEqual(response.status_code, 404) def test_invalid_instance_pk(self): kwargs = self.get_valid_kwargs() kwargs['instance_pk'] = User.objects.order_by('-id')[0].pk + 1 url = reverse(self.url_name, kwargs=kwargs) response = self.client.post(url, data=self.get_valid_data()) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertFalse(results['valid']) self.assertTrue('error' in results) err_info = results['error'] self.assertTrue('message' in err_info) self.assertTrue('does not exist' in err_info['message'], err_info['message']) def test_invalid_field_name(self): kwargs = self.get_valid_kwargs() kwargs['field_name'] = 'ussserrname' url = reverse(self.url_name, kwargs=kwargs) response = self.client.post(url, data=self.get_valid_data()) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertFalse(results['valid']) self.assertTrue('error' in results) err_info = results['error'] self.assertTrue('message' in err_info) self.assertTrue('has no field named' in err_info['message'], err_info['message']) def test_login_required(self): "Return 403 if user is not authenticated." self.client.logout() data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) def test_permission_required(self): "Return 403 if user is does not have permissions to edit the scribble." self.user.user_permissions.remove(self.change_dayslog_perm) data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) class EditTestCase(BaseViewTestCase): "Edit an existing scribble." def setUp(self): super(EditTestCase, self).setUp() self.scribble = self.create_scribble() self.url = reverse('edit-scribble', kwargs={'scribble_id': self.scribble.pk}) def get_valid_data(self): "Base valid data." data = { 'content': '{% now "Y" %}' } return data def test_post_required(self): "Edit view requires a POST." response = self.client.get(self.url) self.assertEqual(response.status_code, 405, "GET should not be allowed.") def test_valid_response(self): "Edit an existing scribble." data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertTrue(results['valid']) scribble = Scribble.objects.get(pk=self.scribble.pk) self.assertEqual(scribble.content, data['content']) def test_invalid_template(self): "Data should not be saved if the template is invalid." data = self.get_valid_data() data['content'] = '{% now %}' response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) results = json.loads(response.content.decode('utf-8')) self.assertFalse(results['valid']) scribble = Scribble.objects.get(pk=self.scribble.pk) self.assertNotEqual(scribble.content, data['content']) def test_invalid_pk(self): "404 is returned if unknown pk is given." self.scribble.delete() data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 404) def test_login_required(self): "Return 403 if user is not authenticated." self.client.logout() data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) def test_permission_required(self): "Return 403 if user is does not have permissions to edit the scribble." self.user.user_permissions.remove(self.change_perm) data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) class DeleteTestCase(BaseViewTestCase): "Delete an existing scribble." def setUp(self): super(DeleteTestCase, self).setUp() self.scribble = self.create_scribble() self.url = self.scribble.get_delete_url() def get_valid_data(self): "Base valid data." return {} def test_post_required(self): "Delete view requires a POST." response = self.client.get(self.url) self.assertEqual(response.status_code, 405, "GET should not be allowed.") def test_valid_response(self): "Delete an existing scribble." data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 200) self.assertRaises(Scribble.DoesNotExist, Scribble.objects.get, pk=self.scribble.pk) def test_invalid_pk(self): "404 is returned if unknown pk is given." self.scribble.delete() data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 404) def test_login_required(self): "Return 403 if user is not authenticated." self.client.logout() data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403) def test_permission_required(self): "Return 403 if user is does not have permissions to delete the scribble." self.user.user_permissions.remove(self.delete_perm) data = self.get_valid_data() response = self.client.post(self.url, data=data) self.assertEqual(response.status_code, 403)
{ "content_hash": "1435bdcf131a10fb0e888390f91e8bc4", "timestamp": "", "source": "github", "line_count": 394, "max_line_length": 91, "avg_line_length": 38.868020304568525, "alnum_prop": 0.6330808410604676, "repo_name": "imposeren/django-scribbler", "id": "91466c5128a68124cbd4acf077cb34ae9ad2305d", "size": "15314", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scribbler/tests/test_views.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "9135" }, { "name": "HTML", "bytes": "51799" }, { "name": "JavaScript", "bytes": "37395" }, { "name": "Makefile", "bytes": "2520" }, { "name": "Nginx", "bytes": "108" }, { "name": "Python", "bytes": "70943" }, { "name": "Shell", "bytes": "176" } ], "symlink_target": "" }
from datetime import datetime, timedelta, timezone import graphene from django.urls import reverse from freezegun import freeze_time from .....account.error_codes import AccountErrorCode from .....core.jwt import ( JWT_ACCESS_TYPE, JWT_REFRESH_TYPE, create_refresh_token, jwt_decode, ) from .....core.utils import build_absolute_uri from ....tests.utils import get_graphql_content from ...mutations.authentication import _get_new_csrf_token MUTATION_CREATE_TOKEN = """ mutation tokenCreate($email: String!, $password: String!, $audience: String){ tokenCreate(email: $email, password: $password, audience: $audience) { token refreshToken csrfToken user { email } errors { field message } errors { field message code } } } """ @freeze_time("2020-03-18 12:00:00") def test_create_token(api_client, customer_user, settings): variables = {"email": customer_user.email, "password": customer_user._password} response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) content = get_graphql_content(response) data = content["data"]["tokenCreate"] user_email = data["user"]["email"] assert customer_user.email == user_email assert content["data"]["tokenCreate"]["errors"] == [] token = data["token"] refreshToken = data["refreshToken"] payload = jwt_decode(token) assert payload["email"] == customer_user.email assert payload["user_id"] == graphene.Node.to_global_id("User", customer_user.id) assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow() expected_expiration_datetime = datetime.utcnow() + settings.JWT_TTL_ACCESS assert datetime.fromtimestamp(payload["exp"]) == expected_expiration_datetime assert payload["type"] == JWT_ACCESS_TYPE payload = jwt_decode(refreshToken) assert payload["email"] == customer_user.email assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow() expected_expiration_datetime = datetime.utcnow() + settings.JWT_TTL_REFRESH assert datetime.fromtimestamp(payload["exp"]) == expected_expiration_datetime assert payload["type"] == JWT_REFRESH_TYPE assert payload["token"] == customer_user.jwt_token_key assert payload["iss"] == build_absolute_uri(reverse("api")) @freeze_time("2020-03-18 12:00:00") def test_create_token_with_audience(api_client, customer_user, settings): audience = "dashboard" variables = { "email": customer_user.email, "password": customer_user._password, "audience": audience, } response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) content = get_graphql_content(response) data = content["data"]["tokenCreate"] user_email = data["user"]["email"] assert customer_user.email == user_email assert content["data"]["tokenCreate"]["errors"] == [] token = data["token"] refreshToken = data["refreshToken"] payload = jwt_decode(token) assert payload["email"] == customer_user.email assert payload["user_id"] == graphene.Node.to_global_id("User", customer_user.id) assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow() expected_expiration_datetime = datetime.utcnow() + settings.JWT_TTL_ACCESS assert datetime.fromtimestamp(payload["exp"]) == expected_expiration_datetime assert payload["type"] == JWT_ACCESS_TYPE assert payload["aud"] == f"custom:{audience}" payload = jwt_decode(refreshToken) assert payload["email"] == customer_user.email assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow() expected_expiration_datetime = datetime.utcnow() + settings.JWT_TTL_REFRESH assert datetime.fromtimestamp(payload["exp"]) == expected_expiration_datetime assert payload["type"] == JWT_REFRESH_TYPE assert payload["token"] == customer_user.jwt_token_key assert payload["aud"] == f"custom:{audience}" @freeze_time("2020-03-18 12:00:00") def test_create_token_sets_cookie(api_client, customer_user, settings, monkeypatch): csrf_token = _get_new_csrf_token() monkeypatch.setattr( "saleor.graphql.account.mutations.authentication._get_new_csrf_token", lambda: csrf_token, ) variables = {"email": customer_user.email, "password": customer_user._password} response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) expected_refresh_token = create_refresh_token( customer_user, {"csrfToken": csrf_token} ) refresh_token = response.cookies["refreshToken"] assert refresh_token.value == expected_refresh_token expected_expires = datetime.utcnow() + settings.JWT_TTL_REFRESH expected_expires += timedelta(seconds=1) expires = datetime.strptime(refresh_token["expires"], "%a, %d %b %Y %H:%M:%S %Z") assert expires == expected_expires assert refresh_token["httponly"] assert refresh_token["secure"] def test_create_token_invalid_password(api_client, customer_user): variables = {"email": customer_user.email, "password": "wrongpassword"} expected_error_code = AccountErrorCode.INVALID_CREDENTIALS.value.upper() response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) content = get_graphql_content(response) response_error = content["data"]["tokenCreate"]["errors"][0] assert response_error["code"] == expected_error_code assert response_error["field"] == "email" def test_create_token_invalid_email(api_client, customer_user): variables = {"email": "wrongemail", "password": "wrongpassword"} expected_error_code = AccountErrorCode.INVALID_CREDENTIALS.value.upper() response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) content = get_graphql_content(response) response_error = content["data"]["tokenCreate"]["errors"][0] assert response_error["code"] == expected_error_code assert response_error["field"] == "email" def test_create_token_unconfirmed_email(api_client, customer_user): # given variables = {"email": customer_user.email, "password": customer_user._password} customer_user.is_active = False customer_user.save() expected_error_code = AccountErrorCode.ACCOUNT_NOT_CONFIRMED.value.upper() # when response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) content = get_graphql_content(response) response_error = content["data"]["tokenCreate"]["errors"][0] # then assert response_error["code"] == expected_error_code assert response_error["field"] == "email" def test_create_token_deactivated_user(api_client, customer_user): # given variables = {"email": customer_user.email, "password": customer_user._password} customer_user.is_active = False customer_user.last_login = datetime(2020, 3, 18, tzinfo=timezone.utc) customer_user.save() expected_error_code = AccountErrorCode.INACTIVE.value.upper() # when response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) content = get_graphql_content(response) response_error = content["data"]["tokenCreate"]["errors"][0] # then assert response_error["code"] == expected_error_code assert response_error["field"] == "email" @freeze_time("2020-03-18 12:00:00") def test_create_token_active_user_logged_before(api_client, customer_user, settings): variables = {"email": customer_user.email, "password": customer_user._password} customer_user.last_login = datetime(2020, 3, 18, tzinfo=timezone.utc) customer_user.save() response = api_client.post_graphql(MUTATION_CREATE_TOKEN, variables) content = get_graphql_content(response) data = content["data"]["tokenCreate"] user_email = data["user"]["email"] assert customer_user.email == user_email assert content["data"]["tokenCreate"]["errors"] == [] token = data["token"] refreshToken = data["refreshToken"] payload = jwt_decode(token) assert payload["email"] == customer_user.email assert payload["user_id"] == graphene.Node.to_global_id("User", customer_user.id) assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow() expected_expiration_datetime = datetime.utcnow() + settings.JWT_TTL_ACCESS assert datetime.fromtimestamp(payload["exp"]) == expected_expiration_datetime assert payload["type"] == JWT_ACCESS_TYPE payload = jwt_decode(refreshToken) assert payload["email"] == customer_user.email assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow() expected_expiration_datetime = datetime.utcnow() + settings.JWT_TTL_REFRESH assert datetime.fromtimestamp(payload["exp"]) == expected_expiration_datetime assert payload["type"] == JWT_REFRESH_TYPE assert payload["token"] == customer_user.jwt_token_key assert payload["iss"] == build_absolute_uri(reverse("api"))
{ "content_hash": "daeeec432bc9f6764acf02b45b3b6c41", "timestamp": "", "source": "github", "line_count": 223, "max_line_length": 86, "avg_line_length": 40.30493273542601, "alnum_prop": 0.6838006230529595, "repo_name": "mociepka/saleor", "id": "784a560aa7c60242253a49bf218792ac90319561", "size": "8988", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "saleor/graphql/account/tests/mutations/test_token_create.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "2228" }, { "name": "HTML", "bytes": "249248" }, { "name": "Procfile", "bytes": "290" }, { "name": "Python", "bytes": "12686831" }, { "name": "Shell", "bytes": "439" } ], "symlink_target": "" }
''' Move words around @author Justin Hileman <http://justinhileman.com> ''' import cp_actions as cp def act(controller, bundle, options): context = cp.get_context(controller) line_ending = cp.get_line_ending(context) direction = cp.get_option(options, 'direction', 'right') line_text, line_range = cp.lines_and_range(context) selection, select_range = cp.selection_and_range(context) if select_range.length == 0: selection, select_range = cp.words_and_range(context) cp.say(context, 'word(s)', '||%s||' % selection) return if direction.lower() == 'left': prefix = line_text[:(select_range.location - line_range.location)] if not prefix.strip(): cp.beep() return # we care about the original length of line after, not the balanced one we'll get in a second len_line_after = len(line_after) line_after, text = cp.balance_line_endings(line_after, text, line_ending) line_delta = len(line_after) - len_line_after select_start = select_range.location + len(line_after) select_end = min(select_start + select_range.length, len(context.string())) text = line_after + text select_range = cp.new_range(select_start, max(0,select_end - select_start)) target_range = cp.new_range(target_range.location, max(0, target_range.length + len(line_after) - (len(line_after) - len_line_after))) else: line_before = cp.get_line_before(context, target_range) if line_before is None: return # we care about the original length of line before, not the balanced one we'll get in a second len_line_before = len(line_before) text, line_before = cp.balance_line_endings(text, line_before, line_ending) text = text + line_before select_range = cp.new_range(select_range.location - len_line_before, select_range.length) target_range = cp.new_range(target_range.location - len_line_before, target_range.length + len_line_before) cp.insert_text_and_select(context, text, target_range, select_range)
{ "content_hash": "168220caf4f99e2ea8827438d3d49f26", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 142, "avg_line_length": 40.054545454545455, "alnum_prop": 0.6300499319110304, "repo_name": "bobthecow/ManipulateCoda", "id": "7727996c1b81f3d269c59b50bcbc8a097d0d8c55", "size": "2203", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/Support/Scripts/LCMoveWord.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "46926" }, { "name": "JavaScript", "bytes": "15800" }, { "name": "Objective-C", "bytes": "6873" }, { "name": "Python", "bytes": "1566052" }, { "name": "TeX", "bytes": "38517" } ], "symlink_target": "" }
import os import shutil import glob for e in glob.glob(r'delivery/include/*.h'): os.remove(e) if os.path.exists(r'delivery/crypt.lib'): os.remove(r'delivery/crypt.lib') for h in glob.glob(r'crypt/*.h'): shutil.copy(h, r'delivery/include') shutil.copy(r'x64/Release/crypt.lib', r'delivery')
{ "content_hash": "da623eb96e0fd56efa3719af857c9a69", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 50, "avg_line_length": 21.428571428571427, "alnum_prop": 0.7066666666666667, "repo_name": "KaiSta/scrates", "id": "cf87c5a6bc17ad37f52d0be297f4bca7e78b2cfd", "size": "300", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "deploy.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "49" }, { "name": "C++", "bytes": "1197277" }, { "name": "Makefile", "bytes": "652" }, { "name": "Python", "bytes": "1612" }, { "name": "QML", "bytes": "30507" }, { "name": "QMake", "bytes": "1719" } ], "symlink_target": "" }
"""Support for interacting with Linode nodes.""" import logging import voluptuous as vol from homeassistant.components.linode import ( ATTR_CREATED, ATTR_IPV4_ADDRESS, ATTR_IPV6_ADDRESS, ATTR_MEMORY, ATTR_NODE_ID, ATTR_NODE_NAME, ATTR_REGION, ATTR_VCPUS, CONF_NODES, DATA_LINODE) from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['linode'] DEFAULT_NAME = 'Node' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_NODES): vol.All(cv.ensure_list, [cv.string]), }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Linode Node switch.""" linode = hass.data.get(DATA_LINODE) nodes = config.get(CONF_NODES) dev = [] for node in nodes: node_id = linode.get_node_id(node) if node_id is None: _LOGGER.error("Node %s is not available", node) return dev.append(LinodeSwitch(linode, node_id)) add_entities(dev, True) class LinodeSwitch(SwitchDevice): """Representation of a Linode Node switch.""" def __init__(self, li, node_id): """Initialize a new Linode sensor.""" self._linode = li self._node_id = node_id self.data = None self._state = None self._attrs = {} self._name = None @property def name(self): """Return the name of the switch.""" return self._name @property def is_on(self): """Return true if switch is on.""" return self._state @property def device_state_attributes(self): """Return the state attributes of the Linode Node.""" return self._attrs def turn_on(self, **kwargs): """Boot-up the Node.""" if self.data.status != 'running': self.data.boot() def turn_off(self, **kwargs): """Shutdown the nodes.""" if self.data.status == 'running': self.data.shutdown() def update(self): """Get the latest data from the device and update the data.""" self._linode.update() if self._linode.data is not None: for node in self._linode.data: if node.id == self._node_id: self.data = node if self.data is not None: self._state = self.data.status == 'running' self._attrs = { ATTR_CREATED: self.data.created, ATTR_NODE_ID: self.data.id, ATTR_NODE_NAME: self.data.label, ATTR_IPV4_ADDRESS: self.data.ipv4, ATTR_IPV6_ADDRESS: self.data.ipv6, ATTR_MEMORY: self.data.specs.memory, ATTR_REGION: self.data.region.country, ATTR_VCPUS: self.data.specs.vcpus, } self._name = self.data.label
{ "content_hash": "56533ab74e8bd429513d4b3edcc32357", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 73, "avg_line_length": 30.333333333333332, "alnum_prop": 0.5885989010989011, "repo_name": "nugget/home-assistant", "id": "0cab2f4d0f25fa26b1108ec768960cd05c7e79f9", "size": "2912", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/linode/switch.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1175" }, { "name": "Dockerfile", "bytes": "1081" }, { "name": "HCL", "bytes": "826" }, { "name": "Python", "bytes": "14492390" }, { "name": "Ruby", "bytes": "745" }, { "name": "Shell", "bytes": "17526" } ], "symlink_target": "" }
from scrapy.spider import BaseSpider from scrapy.selector import HtmlXPathSelector from scrapy.http import Request, HtmlResponse from scrapy.utils.response import get_base_url from scrapy.utils.url import urljoin_rfc from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader class HuntinglifeNetSpider(BaseSpider): name = 'huntinglife.net' allowed_domains = ['huntinglife.net'] start_urls = ('http://webshop.huntinglife.net/shop.aspx',) def parse(self, response): hxs = HtmlXPathSelector(response) for url in hxs.select(u'//div[@class="lm_catalog"]/ul/li/ul/li/a/@href').extract(): yield Request(url, callback=self.parse_product) def parse_product(self, response): hxs = HtmlXPathSelector(response) for item in hxs.select(u'//td/div[contains(@class,"ProductDisplayList")]'): product_loader = ProductLoader(item=Product(), selector=item) product_loader.add_xpath('name', u'.//div[@class="ProductDisplayList_Name"]/a/text()') price = item.select(u'.//span[@class="price"]/text()').extract()[0] price = price.strip().lstrip('DKK ').replace('.', '').replace(',', '.') product_loader.add_value('price', price) product_loader.add_xpath('url', u'.//div[@class="ProductDisplayList_Name"]/a/@href') product = product_loader.load_item() # Product page contains the full name, # list has something shorter without important information like caliber yield Request(product['url'], meta={'product':product}, callback=self.parse_product_name) def parse_product_name(self, response): hxs = HtmlXPathSelector(response) product = response.meta['product'] product['name'] = hxs.select(u'//h1/text()').extract()[0].strip() yield product
{ "content_hash": "9b1eb44217cc2971593df0064fcb0481", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 101, "avg_line_length": 44.666666666666664, "alnum_prop": 0.662046908315565, "repo_name": "0--key/lib", "id": "b85808b5ed7dda1546104870b00ff52f31167aa9", "size": "1876", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "portfolio/Python/scrapy/sie_hunting/huntinglife_net.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "28210" }, { "name": "Emacs Lisp", "bytes": "76390" }, { "name": "HTML", "bytes": "1136671" }, { "name": "JavaScript", "bytes": "27718" }, { "name": "PHP", "bytes": "378537" }, { "name": "Python", "bytes": "1892998" }, { "name": "Shell", "bytes": "4030" } ], "symlink_target": "" }
from rdkit import Geometry from rdkit.Chem.FeatMaps import FeatMaps, FeatMapPoint import re """ ScoreMode=All DirScoreMode=Ignore BeginParams family=Aromatic radius=2.5 width=1.0 profile=Gaussian family=Acceptor radius=1.5 EndParams # optional BeginPoints family=Acceptor pos=(1.0, 0.0, 5.0) weight=1.25 dir=(1, 1, 0) family=Aromatic pos=(0.0,1.0,0.0) weight=2.0 dir=(0,0,1) dir=(0,0,-1) family=Acceptor pos=(1.0,1.0,2.0) weight=1.25 EndPoints """ class FeatMapParseError(ValueError): pass class FeatMapParser(object): data = None def __init__(self, file=None, data=None): if file: self.data = file.readlines() elif data: self.SetData(data) self._lineNum = 0 def SetData(self, data): if isinstance(data, str): self.data = data.split('\n') else: self.data = data self._lineNum = 0 def _NextLine(self): txt = '' while 1: try: l = self.data[self._lineNum].split('#')[0].strip() except IndexError: break self._lineNum += 1 if l: txt += l if l[-1] != '\\': break return txt def Parse(self, featMap=None): if featMap is None: featMap = FeatMaps.FeatMap() l = self._NextLine().strip() while l: splitL = l.split('=') if len(splitL) == 1: keyword = splitL[0].strip().lower() if keyword == 'beginpoints': pts = self.ParseFeatPointBlock() for pt in pts: featMap.AddFeatPoint(pt) elif keyword == 'beginparams': featMap.params = self.ParseParamBlock() else: raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum)) else: keyword = splitL[0].strip().lower() val = splitL[1].strip() if keyword == 'scoremode': try: featMap.scoreMode = getattr(FeatMaps.FeatMapScoreMode, val) except AttributeError: raise FeatMapParseError('ScoreMode %s not recognized on line %d' % (val, self._lineNum)) elif keyword == 'dirscoremode': try: featMap.dirScoreMode = getattr(FeatMaps.FeatDirScoreMode, val) except AttributeError: raise FeatMapParseError('DirScoreMode %s not recognized on line %d' % (val, self._lineNum)) else: raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum)) l = self._NextLine().strip() return featMap def ParseParamBlock(self): paramLineSplitter = re.compile(r'([a-zA-Z]+) *= *(\S+)') params = {} l = self._NextLine() while l and l != 'EndParams': param = FeatMaps.FeatMapParams() vals = paramLineSplitter.findall(l) for name, val in vals: name = name.lower() if name == 'family': family = val elif name == 'radius': param.radius = float(val) elif name == 'width': param.width = float(val) elif name == 'profile': try: param.featProfile = getattr(param.FeatProfile, val) except AttributeError: raise FeatMapParseError('Profile %s not recognized on line %d' % (val, self._lineNum)) else: raise FeatMapParseError('FeatMapParam option %s not recognized on line %d' % (name, self._lineNum)) params[family] = param l = self._NextLine() if l != 'EndParams': raise FeatMapParseError('EndParams line not found') return params def _parsePoint(self, txt): txt = txt.strip() startP = 0 endP = len(txt) if txt[0] == '(': startP += 1 if txt[-1] == ')': endP -= 1 txt = txt[startP:endP] splitL = txt.split(',') if len(splitL) != 3: raise ValueError('Bad location string') vs = [float(x) for x in splitL] pt = Geometry.Point3D(vs[0], vs[1], vs[2]) return pt def ParseFeatPointBlock(self): featLineSplitter = re.compile(r'([a-zA-Z]+) *= *') feats = [] l = self._NextLine() while l and l != 'EndPoints': vals = featLineSplitter.split(l) while vals.count(''): vals.remove('') p = FeatMapPoint.FeatMapPoint() i = 0 while i < len(vals): name = vals[i].lower() if name == 'family': i += 1 val = vals[i].strip() p.SetFamily(val) elif name == 'weight': i += 1 val = float(vals[i]) p.weight = val elif name == 'pos': i += 1 val = vals[i] pos = self._parsePoint(val) p.SetPos(pos) elif name == 'dir': i += 1 val = vals[i] pos = self._parsePoint(val) p.featDirs.append(pos) else: raise FeatMapParseError('FeatPoint option %s not recognized on line %d' % (name, self._lineNum)) i += 1 feats.append(p) l = self._NextLine() return feats
{ "content_hash": "3a6e55a7ce50a2bcacc762f30ad95d0a", "timestamp": "", "source": "github", "line_count": 181, "max_line_length": 100, "avg_line_length": 27.85635359116022, "alnum_prop": 0.5509718365727886, "repo_name": "rvianello/rdkit", "id": "12db430bbc7acdeb96231ea65d5ada02cec0c2dc", "size": "5307", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "rdkit/Chem/FeatMaps/FeatMapParser.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "385" }, { "name": "C", "bytes": "227962" }, { "name": "C#", "bytes": "6745" }, { "name": "C++", "bytes": "8796795" }, { "name": "CMake", "bytes": "632104" }, { "name": "Fortran", "bytes": "7661" }, { "name": "HTML", "bytes": "18138" }, { "name": "Java", "bytes": "301151" }, { "name": "JavaScript", "bytes": "11595" }, { "name": "Jupyter Notebook", "bytes": "43461" }, { "name": "LLVM", "bytes": "30376" }, { "name": "Lex", "bytes": "4508" }, { "name": "Makefile", "bytes": "10552" }, { "name": "Objective-C", "bytes": "298" }, { "name": "Python", "bytes": "3363330" }, { "name": "QMake", "bytes": "389" }, { "name": "SMT", "bytes": "3010" }, { "name": "Shell", "bytes": "9082" }, { "name": "Smarty", "bytes": "5864" }, { "name": "Yacc", "bytes": "51959" } ], "symlink_target": "" }
"""Tests for soft round.""" from absl.testing import parameterized import tensorflow as tf from tensorflow_compression.python.ops import round_ops class SoftRoundTest(tf.test.TestCase, parameterized.TestCase): def test_soft_round_small_alpha_is_identity(self): x = tf.linspace(-2., 2., 50) y = round_ops.soft_round(x, alpha=1e-13) self.assertAllClose(x, y) def test_soft_round_large_alpha_is_round(self): # We don't care what happens exactly near half-integer values: for offset in range(-5, 5): x = tf.linspace(offset - 0.499, offset + 0.499, 100) y = round_ops.soft_round(x, alpha=2000.0) self.assertAllClose(tf.round(x), y, atol=0.02) def test_soft_inverse_round_small_alpha_is_identity(self): x = tf.linspace(-2., 2., 50) y = round_ops.soft_round_inverse(x, alpha=1e-13) self.assertAllEqual(x, y) def test_soft_inverse_is_actual_inverse(self): x = tf.constant([-1.25, -0.75, 0.75, 1.25], dtype=tf.float32) y = round_ops.soft_round(x, alpha=2.0) x2 = round_ops.soft_round_inverse(y, alpha=2.0) self.assertAllClose(x, x2) def test_soft_round_inverse_large_alpha_is_ceil_minus_half(self): # We don't care what happens exactly near integer values: for offset in range(-5, 5): x = tf.linspace(offset + 0.001, offset + 0.999, 100) y = round_ops.soft_round_inverse(x, alpha=5000.0) self.assertAllClose(tf.math.ceil(x) - 0.5, y, atol=0.001) def test_conditional_mean_large_alpha_is_round(self): # We don't care what happens exactly near integer values: for offset in range(-5, 5): x = tf.linspace(offset + 0.001, offset + 0.999, 100) y = round_ops.soft_round_conditional_mean(x, alpha=5000.0) self.assertAllClose(tf.math.round(x), y, atol=0.001) @parameterized.parameters(0., 1e-6, 1e-2, 5., 1e6) def test_soft_round_values_and_gradients_are_finite(self, alpha): x = tf.linspace(0., 1., 11) # covers exact integers and half-integers with tf.GradientTape() as tape: tape.watch(x) y = round_ops.soft_round(x, alpha=alpha) dy = tape.gradient(y, x) self.assertAllEqual(tf.math.is_finite(y), tf.ones(x.shape, dtype=bool)) self.assertAllEqual(tf.math.is_finite(dy), tf.ones(x.shape, dtype=bool)) @parameterized.parameters(0., 1e-6, 1e-2, 5., 1e6) def test_soft_round_inverse_values_and_gradients_are_finite(self, alpha): x = tf.linspace(-.5, .5, 11) # covers exact integers and half-integers with tf.GradientTape() as tape: tape.watch(x) y = round_ops.soft_round_inverse(x, alpha=alpha) dy = tape.gradient(y, x) self.assertAllEqual(tf.math.is_finite(y), tf.ones(x.shape, dtype=bool)) is_finite = tf.math.is_finite(dy) expected_finite = tf.ones(dy.shape, dtype=bool) if alpha > 15: # We allow non-finite values at 0 for large alphas, since the function # simply is extremely steep there. expected_finite = tf.tensor_scatter_nd_update( expected_finite, [[5]], [is_finite[5]]) self.assertAllEqual(is_finite, expected_finite) if __name__ == "__main__": tf.test.main()
{ "content_hash": "0db0d49fdbd5b60b2865385dcbb7adde", "timestamp": "", "source": "github", "line_count": 77, "max_line_length": 76, "avg_line_length": 40.44155844155844, "alnum_prop": 0.6657032755298651, "repo_name": "tensorflow/compression", "id": "6e9ad7ae3c8393047da725a7d9e23868e3053372", "size": "3791", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow_compression/python/ops/round_ops_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "251322" }, { "name": "Jupyter Notebook", "bytes": "128509" }, { "name": "Python", "bytes": "575673" }, { "name": "Starlark", "bytes": "14557" } ], "symlink_target": "" }
from __future__ import absolute_import, print_function import os import glob import numpy as np import six from ..utils.ioutils import download, extract class TColor128(object): """`TColor128 <http://www.dabi.temple.edu/~hbling/data/TColor-128/TColor-128.html>`_ Dataset. Publication: ``Encoding color information for visual tracking: algorithms and benchmark``, P. Liang, E. Blasch and H. Ling, TIP, 2015. Args: root_dir (string): Root directory of dataset where sequence folders exist. """ def __init__(self, root_dir, download=True): super(TColor128, self).__init__() self.root_dir = root_dir if download: self._download(root_dir) self._check_integrity(root_dir) self.anno_files = sorted(glob.glob( os.path.join(root_dir, '*/*_gt.txt'))) self.seq_dirs = [os.path.dirname(f) for f in self.anno_files] self.seq_names = [os.path.basename(d) for d in self.seq_dirs] # valid frame range for each sequence self.range_files = [glob.glob( os.path.join(d, '*_frames.txt'))[0] for d in self.seq_dirs] def __getitem__(self, index): r""" Args: index (integer or string): Index or name of a sequence. Returns: tuple: (img_files, anno), where ``img_files`` is a list of file names and ``anno`` is a N x 4 (rectangles) numpy array. """ if isinstance(index, six.string_types): if not index in self.seq_names: raise Exception('Sequence {} not found.'.format(index)) index = self.seq_names.index(index) # load valid frame range frames = np.loadtxt( self.range_files[index], dtype=int, delimiter=',') img_files = [os.path.join( self.seq_dirs[index], 'img/%04d.jpg' % f) for f in range(frames[0], frames[1] + 1)] # load annotations anno = np.loadtxt(self.anno_files[index], delimiter=',') assert len(img_files) == len(anno) assert anno.shape[1] == 4 return img_files, anno def __len__(self): return len(self.seq_names) def _download(self, root_dir): if not os.path.isdir(root_dir): os.makedirs(root_dir) elif len(os.listdir(root_dir)) > 100: print('Files already downloaded.') return url = 'http://www.dabi.temple.edu/~hbling/data/TColor-128/Temple-color-128.zip' zip_file = os.path.join(root_dir, 'Temple-color-128.zip') print('Downloading to %s...' % zip_file) download(url, zip_file) print('\nExtracting to %s...' % root_dir) extract(zip_file, root_dir) return root_dir def _check_integrity(self, root_dir): seq_names = os.listdir(root_dir) seq_names = [n for n in seq_names if not n[0] == '.'] if os.path.isdir(root_dir) and len(seq_names) > 0: # check each sequence folder for seq_name in seq_names: seq_dir = os.path.join(root_dir, seq_name) if not os.path.isdir(seq_dir): print('Warning: sequence %s not exists.' % seq_name) else: # dataset not exists raise Exception('Dataset not found or corrupted. ' + 'You can use download=True to download it.')
{ "content_hash": "784fbf897dba721149c7f3a5f386ad48", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 97, "avg_line_length": 35.40816326530612, "alnum_prop": 0.5636887608069164, "repo_name": "got-10k/toolkit", "id": "452b59bd34ddf00e8195ed7e1b8ff77c90efcd2d", "size": "3470", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "got10k/datasets/tcolor128.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "138011" } ], "symlink_target": "" }
from __future__ import print_function import shutil import sys import os import errno import string import re import traceback import time import datetime import copy import glob from math import ceil, trunc import collections import subprocess from testglobals import * from testutil import * if config.use_threads: import threading try: import thread except ImportError: # Python 3 import _thread as thread global wantToStop wantToStop = False def stopNow(): global wantToStop wantToStop = True def stopping(): return wantToStop # Options valid for the current test only (these get reset to # testdir_testopts after each test). global testopts_local if config.use_threads: testopts_local = threading.local() else: class TestOpts_Local: pass testopts_local = TestOpts_Local() def getTestOpts(): return testopts_local.x def setLocalTestOpts(opts): global testopts_local testopts_local.x=opts def isStatsTest(): opts = getTestOpts() return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0 # This can be called at the top of a file of tests, to set default test options # for the following tests. def setTestOpts( f ): global thisdir_settings thisdir_settings = [thisdir_settings, f] # ----------------------------------------------------------------------------- # Canned setup functions for common cases. eg. for a test you might say # # test('test001', normal, compile, ['']) # # to run it without any options, but change it to # # test('test001', expect_fail, compile, ['']) # # to expect failure for this test. def normal( name, opts ): return; def skip( name, opts ): opts.skip = 1 def expect_fail( name, opts ): opts.expect = 'fail'; def reqlib( lib ): return lambda name, opts, l=lib: _reqlib (name, opts, l ) # Cache the results of looking to see if we have a library or not. # This makes quite a difference, especially on Windows. have_lib = {} def _reqlib( name, opts, lib ): if lib in have_lib: got_it = have_lib[lib] else: cmd = strip_quotes(config.ghc_pkg) p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # read from stdout and stderr to avoid blocking due to # buffers filling p.communicate() r = p.wait() got_it = r == 0 have_lib[lib] = got_it if not got_it: opts.expect = 'missing-lib' def req_haddock( name, opts ): if not config.haddock: opts.expect = 'missing-lib' def req_profiling( name, opts ): if not config.have_profiling: opts.expect = 'fail' def req_shared_libs( name, opts ): if not config.have_shared_libs: opts.expect = 'fail' def req_interp( name, opts ): if not config.have_interp: opts.expect = 'fail' def req_smp( name, opts ): if not config.have_smp: opts.expect = 'fail' def ignore_output( name, opts ): opts.ignore_output = 1 def no_stdin( name, opts ): opts.no_stdin = 1 def combined_output( name, opts ): opts.combined_output = True # ----- def expect_fail_for( ways ): return lambda name, opts, w=ways: _expect_fail_for( name, opts, w ) def _expect_fail_for( name, opts, ways ): opts.expect_fail_for = ways def expect_broken( bug ): return lambda name, opts, b=bug: _expect_broken (name, opts, b ) def _expect_broken( name, opts, bug ): record_broken(name, opts, bug) opts.expect = 'fail'; def expect_broken_for( bug, ways ): return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w ) def _expect_broken_for( name, opts, bug, ways ): record_broken(name, opts, bug) opts.expect_fail_for = ways def record_broken(name, opts, bug): global brokens me = (bug, opts.testdir, name) if not me in brokens: brokens.append(me) # ----- def omit_ways( ways ): return lambda name, opts, w=ways: _omit_ways( name, opts, w ) def _omit_ways( name, opts, ways ): opts.omit_ways = ways # ----- def only_ways( ways ): return lambda name, opts, w=ways: _only_ways( name, opts, w ) def _only_ways( name, opts, ways ): opts.only_ways = ways # ----- def extra_ways( ways ): return lambda name, opts, w=ways: _extra_ways( name, opts, w ) def _extra_ways( name, opts, ways ): opts.extra_ways = ways # ----- def omit_compiler_types( compiler_types ): return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c) def _omit_compiler_types( name, opts, compiler_types ): if config.compiler_type in compiler_types: opts.skip = 1 # ----- def only_compiler_types( compiler_types ): return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c) def _only_compiler_types( name, opts, compiler_types ): if config.compiler_type not in compiler_types: opts.skip = 1 # ----- def set_stdin( file ): return lambda name, opts, f=file: _set_stdin(name, opts, f); def _set_stdin( name, opts, f ): opts.stdin = f # ----- def exit_code( val ): return lambda name, opts, v=val: _exit_code(name, opts, v); def _exit_code( name, opts, v ): opts.exit_code = v def signal_exit_code( val ): if opsys('solaris2'): return exit_code( val ); else: # When application running on Linux receives fatal error # signal, then its exit code is encoded as 128 + signal # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html # I assume that Mac OS X behaves in the same way at least Mac # OS X builder behavior suggests this. return exit_code( val+128 ); # ----- def timeout_multiplier( val ): return lambda name, opts, v=val: _timeout_multiplier(name, opts, v) def _timeout_multiplier( name, opts, v ): opts.timeout_multiplier = v # ----- def extra_run_opts( val ): return lambda name, opts, v=val: _extra_run_opts(name, opts, v); def _extra_run_opts( name, opts, v ): opts.extra_run_opts = v # ----- def extra_hc_opts( val ): return lambda name, opts, v=val: _extra_hc_opts(name, opts, v); def _extra_hc_opts( name, opts, v ): opts.extra_hc_opts = v # ----- def extra_clean( files ): return lambda name, opts, v=files: _extra_clean(name, opts, v); def _extra_clean( name, opts, v ): opts.clean_files = v # ----- def stats_num_field( field, expecteds ): return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e); def _stats_num_field( name, opts, field, expecteds ): if field in opts.stats_range_fields: framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check') if type(expecteds) is list: for (b, expected, dev) in expecteds: if b: opts.stats_range_fields[field] = (expected, dev) return framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check') else: (expected, dev) = expecteds opts.stats_range_fields[field] = (expected, dev) def compiler_stats_num_field( field, expecteds ): return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e); def _compiler_stats_num_field( name, opts, field, expecteds ): if field in opts.compiler_stats_range_fields: framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check') # Compiler performance numbers change when debugging is on, making the results # useless and confusing. Therefore, skip if debugging is on. if compiler_debugged(): skip(name, opts) for (b, expected, dev) in expecteds: if b: opts.compiler_stats_range_fields[field] = (expected, dev) return framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check') # ----- def when(b, f): # When list_brokens is on, we want to see all expect_broken calls, # so we always do f if b or config.list_broken: return f else: return normal def unless(b, f): return when(not b, f) def doing_ghci(): return 'ghci' in config.run_ways def ghci_dynamic( ): return config.ghc_dynamic def fast(): return config.fast def platform( plat ): return config.platform == plat def opsys( os ): return config.os == os def arch( arch ): return config.arch == arch def wordsize( ws ): return config.wordsize == str(ws) def msys( ): return config.msys def cygwin( ): return config.cygwin def have_vanilla( ): return config.have_vanilla def have_dynamic( ): return config.have_dynamic def have_profiling( ): return config.have_profiling def in_tree_compiler( ): return config.in_tree_compiler def compiler_type( compiler ): return config.compiler_type == compiler def compiler_lt( compiler, version ): return config.compiler_type == compiler and \ version_lt(config.compiler_version, version) def compiler_le( compiler, version ): return config.compiler_type == compiler and \ version_le(config.compiler_version, version) def compiler_gt( compiler, version ): return config.compiler_type == compiler and \ version_gt(config.compiler_version, version) def compiler_ge( compiler, version ): return config.compiler_type == compiler and \ version_ge(config.compiler_version, version) def unregisterised( ): return config.unregisterised def compiler_profiled( ): return config.compiler_profiled def compiler_debugged( ): return config.compiler_debugged def tag( t ): return t in config.compiler_tags # --- def namebase( nb ): return lambda opts, nb=nb: _namebase(opts, nb) def _namebase( opts, nb ): opts.with_namebase = nb # --- def high_memory_usage(name, opts): opts.alone = True # If a test is for a multi-CPU race, then running the test alone # increases the chance that we'll actually see it. def multi_cpu_race(name, opts): opts.alone = True # --- def literate( name, opts ): opts.literate = 1; def c_src( name, opts ): opts.c_src = 1; def objc_src( name, opts ): opts.objc_src = 1; def objcpp_src( name, opts ): opts.objcpp_src = 1; def cmm_src( name, opts ): opts.cmm_src = 1; def outputdir( odir ): return lambda name, opts, d=odir: _outputdir(name, opts, d) def _outputdir( name, opts, odir ): opts.outputdir = odir; # ---- def pre_cmd( cmd ): return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd) def _pre_cmd( name, opts, cmd ): opts.pre_cmd = cmd # ---- def clean_cmd( cmd ): return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd) def _clean_cmd( name, opts, cmd ): opts.clean_cmd = cmd # ---- def cmd_prefix( prefix ): return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix) def _cmd_prefix( name, opts, prefix ): opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd; # ---- def cmd_wrapper( fun ): return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun) def _cmd_wrapper( name, opts, fun ): opts.cmd_wrapper = fun # ---- def compile_cmd_prefix( prefix ): return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix) def _compile_cmd_prefix( name, opts, prefix ): opts.compile_cmd_prefix = prefix # ---- def check_stdout( f ): return lambda name, opts, f=f: _check_stdout(name, opts, f) def _check_stdout( name, opts, f ): opts.check_stdout = f # ---- def normalise_slashes( name, opts ): _normalise_fun(name, opts, normalise_slashes_) def normalise_exe( name, opts ): _normalise_fun(name, opts, normalise_exe_) def normalise_fun( *fs ): return lambda name, opts: _normalise_fun(name, opts, fs) def _normalise_fun( name, opts, *fs ): opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs) def normalise_errmsg_fun( *fs ): return lambda name, opts: _normalise_errmsg_fun(name, opts, fs) def _normalise_errmsg_fun( name, opts, *fs ): opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs) def normalise_version_( *pkgs ): def normalise_version__( str ): return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+', '\\1-<VERSION>', str) return normalise_version__ def normalise_version( *pkgs ): def normalise_version__( name, opts ): _normalise_fun(name, opts, normalise_version_(*pkgs)) _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs)) return normalise_version__ def join_normalisers(*a): """ Compose functions, flattening sequences. join_normalisers(f1,[f2,f3],f4) is the same as lambda x: f1(f2(f3(f4(x)))) """ def flatten(l): """ Taken from http://stackoverflow.com/a/2158532/946226 """ for el in l: if isinstance(el, collections.Iterable) and not isinstance(el, basestring): for sub in flatten(el): yield sub else: yield el a = flatten(a) fn = lambda x:x # identity function for f in a: assert callable(f) fn = lambda x,f=f,fn=fn: fn(f(x)) return fn # ---- # Function for composing two opt-fns together def executeSetups(fs, name, opts): if type(fs) is list: # If we have a list of setups, then execute each one for f in fs: executeSetups(f, name, opts) else: # fs is a single function, so just apply it fs(name, opts) # ----------------------------------------------------------------------------- # The current directory of tests def newTestDir( dir ): global thisdir_settings # reset the options for this test directory thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir ) def _newTestDir( name, opts, dir ): opts.testdir = dir opts.compiler_always_flags = config.compiler_always_flags # ----------------------------------------------------------------------------- # Actually doing tests parallelTests = [] aloneTests = [] allTestNames = set([]) def runTest (opts, name, func, args): ok = 0 if config.use_threads: t.thread_pool.acquire() try: while config.threads<(t.running_threads+1): t.thread_pool.wait() t.running_threads = t.running_threads+1 ok=1 t.thread_pool.release() thread.start_new_thread(test_common_thread, (name, opts, func, args)) except: if not ok: t.thread_pool.release() else: test_common_work (name, opts, func, args) # name :: String # setup :: TestOpts -> IO () def test (name, setup, func, args): global aloneTests global parallelTests global allTestNames global thisdir_settings if name in allTestNames: framework_fail(name, 'duplicate', 'There are multiple tests with this name') if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name): framework_fail(name, 'bad_name', 'This test has an invalid name') # Make a deep copy of the default_testopts, as we need our own copy # of any dictionaries etc inside it. Otherwise, if one test modifies # them, all tests will see the modified version! myTestOpts = copy.deepcopy(default_testopts) executeSetups([thisdir_settings, setup], name, myTestOpts) thisTest = lambda : runTest(myTestOpts, name, func, args) if myTestOpts.alone: aloneTests.append(thisTest) else: parallelTests.append(thisTest) allTestNames.add(name) if config.use_threads: def test_common_thread(name, opts, func, args): t.lock.acquire() try: test_common_work(name,opts,func,args) finally: t.lock.release() t.thread_pool.acquire() t.running_threads = t.running_threads - 1 t.thread_pool.notify() t.thread_pool.release() def get_package_cache_timestamp(): if config.package_conf_cache_file == '': return 0.0 else: try: return os.stat(config.package_conf_cache_file).st_mtime except: return 0.0 def test_common_work (name, opts, func, args): try: t.total_tests = t.total_tests+1 setLocalTestOpts(opts) package_conf_cache_file_start_timestamp = get_package_cache_timestamp() # All the ways we might run this test if func == compile or func == multimod_compile: all_ways = config.compile_ways elif func == compile_and_run or func == multimod_compile_and_run: all_ways = config.run_ways elif func == ghci_script: if 'ghci' in config.run_ways: all_ways = ['ghci'] else: all_ways = [] else: all_ways = ['normal'] # A test itself can request extra ways by setting opts.extra_ways all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways] t.total_test_cases = t.total_test_cases + len(all_ways) ok_way = lambda way: \ not getTestOpts().skip \ and (config.only == [] or name in config.only) \ and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \ and (config.cmdline_ways == [] or way in config.cmdline_ways) \ and (not (config.skip_perf_tests and isStatsTest())) \ and way not in getTestOpts().omit_ways # Which ways we are asked to skip do_ways = list(filter (ok_way,all_ways)) # In fast mode, we skip all but one way if config.fast and len(do_ways) > 0: do_ways = [do_ways[0]] if not config.clean_only: # Run the required tests... for way in do_ways: if stopping(): break do_test (name, way, func, args) for way in all_ways: if way not in do_ways: skiptest (name,way) if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []): pretest_cleanup(name) clean([name + suff for suff in [ '', '.exe', '.exe.manifest', '.genscript', '.stderr.normalised', '.stdout.normalised', '.run.stderr.normalised', '.run.stdout.normalised', '.comp.stderr.normalised', '.comp.stdout.normalised', '.interp.stderr.normalised', '.interp.stdout.normalised', '.stats', '.comp.stats', '.hi', '.o', '.prof', '.exe.prof', '.hc', '_stub.h', '_stub.c', '_stub.o', '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']]) if func == multi_compile or func == multi_compile_fail: extra_mods = args[1] clean([replace_suffix(fx[0],'o') for fx in extra_mods]) clean([replace_suffix(fx[0], 'hi') for fx in extra_mods]) clean(getTestOpts().clean_files) if getTestOpts().outputdir != None: odir = in_testdir(getTestOpts().outputdir) try: shutil.rmtree(odir) except: pass try: shutil.rmtree(in_testdir('.hpc.' + name)) except: pass try: cleanCmd = getTestOpts().clean_cmd if cleanCmd != None: result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd) if result != 0: framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result)) except: framework_fail(name, 'cleaning', 'clean-command exception') package_conf_cache_file_end_timestamp = get_package_cache_timestamp(); if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp: framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp)) try: for f in files_written[name]: if os.path.exists(f): try: if not f in files_written_not_removed[name]: files_written_not_removed[name].append(f) except: files_written_not_removed[name] = [f] except: pass except Exception as e: framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e)) def clean(strs): for str in strs: for name in glob.glob(in_testdir(str)): clean_full_path(name) def clean_full_path(name): try: # Remove files... os.remove(name) except OSError as e1: try: # ... and empty directories os.rmdir(name) except OSError as e2: # We don't want to fail here, but we do want to know # what went wrong, so print out the exceptions. # ENOENT isn't a problem, though, as we clean files # that don't necessarily exist. if e1.errno != errno.ENOENT: print(e1) if e2.errno != errno.ENOENT: print(e2) def do_test(name, way, func, args): full_name = name + '(' + way + ')' try: if_verbose(2, "=====> %s %d of %d %s " % \ (full_name, t.total_tests, len(allTestNames), \ [t.n_unexpected_passes, \ t.n_unexpected_failures, \ t.n_framework_failures])) if config.use_threads: t.lock.release() try: preCmd = getTestOpts().pre_cmd if preCmd != None: result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd) if result != 0: framework_fail(name, way, 'pre-command failed: ' + str(result)) except: framework_fail(name, way, 'pre-command exception') try: result = func(*[name,way] + args) finally: if config.use_threads: t.lock.acquire() if getTestOpts().expect != 'pass' and \ getTestOpts().expect != 'fail' and \ getTestOpts().expect != 'missing-lib': framework_fail(name, way, 'bad expected ' + getTestOpts().expect) try: passFail = result['passFail'] except: passFail = 'No passFail found' if passFail == 'pass': if getTestOpts().expect == 'pass' \ and way not in getTestOpts().expect_fail_for: t.n_expected_passes = t.n_expected_passes + 1 if name in t.expected_passes: t.expected_passes[name].append(way) else: t.expected_passes[name] = [way] else: if_verbose(1, '*** unexpected pass for %s' % full_name) t.n_unexpected_passes = t.n_unexpected_passes + 1 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way) elif passFail == 'fail': if getTestOpts().expect == 'pass' \ and way not in getTestOpts().expect_fail_for: reason = result['reason'] tag = result.get('tag') if tag == 'stat': if_verbose(1, '*** unexpected stat test failure for %s' % full_name) t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way) else: if_verbose(1, '*** unexpected failure for %s' % full_name) t.n_unexpected_failures = t.n_unexpected_failures + 1 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way) else: if getTestOpts().expect == 'missing-lib': t.n_missing_libs = t.n_missing_libs + 1 if name in t.missing_libs: t.missing_libs[name].append(way) else: t.missing_libs[name] = [way] else: t.n_expected_failures = t.n_expected_failures + 1 if name in t.expected_failures: t.expected_failures[name].append(way) else: t.expected_failures[name] = [way] else: framework_fail(name, way, 'bad result ' + passFail) except KeyboardInterrupt: stopNow() except: framework_fail(name, way, 'do_test exception') traceback.print_exc() def addPassingTestInfo (testInfos, directory, name, way): directory = re.sub('^\\.[/\\\\]', '', directory) if not directory in testInfos: testInfos[directory] = {} if not name in testInfos[directory]: testInfos[directory][name] = [] testInfos[directory][name].append(way) def addFailingTestInfo (testInfos, directory, name, reason, way): directory = re.sub('^\\.[/\\\\]', '', directory) if not directory in testInfos: testInfos[directory] = {} if not name in testInfos[directory]: testInfos[directory][name] = {} if not reason in testInfos[directory][name]: testInfos[directory][name][reason] = [] testInfos[directory][name][reason].append(way) def skiptest (name, way): # print 'Skipping test \"', name, '\"' t.n_tests_skipped = t.n_tests_skipped + 1 if name in t.tests_skipped: t.tests_skipped[name].append(way) else: t.tests_skipped[name] = [way] def framework_fail( name, way, reason ): full_name = name + '(' + way + ')' if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason)) t.n_framework_failures = t.n_framework_failures + 1 if name in t.framework_failures: t.framework_failures[name].append(way) else: t.framework_failures[name] = [way] def badResult(result): try: if result['passFail'] == 'pass': return False return True except: return True def passed(): return {'passFail': 'pass'} def failBecause(reason, tag=None): return {'passFail': 'fail', 'reason': reason, 'tag': tag} # ----------------------------------------------------------------------------- # Generic command tests # A generic command test is expected to run and exit successfully. # # The expected exit code can be changed via exit_code() as normal, and # the expected stdout/stderr are stored in <testname>.stdout and # <testname>.stderr. The output of the command can be ignored # altogether by using run_command_ignore_output instead of # run_command. def run_command( name, way, cmd ): return simple_run( name, '', cmd, '' ) # ----------------------------------------------------------------------------- # GHCi tests def ghci_script_without_flag(flag): def apply(name, way, script): overrides = [f for f in getTestOpts().compiler_always_flags if f != flag] return ghci_script_override_default_flags(overrides)(name, way, script) return apply def ghci_script_override_default_flags(overrides): def apply(name, way, script): return ghci_script(name, way, script, overrides) return apply def ghci_script( name, way, script, override_flags = None ): # filter out -fforce-recomp from compiler_always_flags, because we're # actually testing the recompilation behaviour in the GHCi tests. flags = ' '.join(get_compiler_flags(override_flags, noforce=True)) way_flags = ' '.join(config.way_flags(name)['ghci']) # We pass HC and HC_OPTS as environment variables, so that the # script can invoke the correct compiler by using ':! $HC $HC_OPTS' cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}' ).format(flags=flags, way_flags=way_flags) getTestOpts().stdin = script return simple_run( name, way, cmd, getTestOpts().extra_run_opts ) # ----------------------------------------------------------------------------- # Compile-only tests def compile_override_default_flags(overrides): def apply(name, way, extra_opts): return do_compile(name, way, 0, '', [], extra_opts, overrides) return apply def compile_fail_override_default_flags(overrides): def apply(name, way, extra_opts): return do_compile(name, way, 1, '', [], extra_opts, overrides) return apply def compile_without_flag(flag): def apply(name, way, extra_opts): overrides = [f for f in getTestOpts().compiler_always_flags if f != flag] return compile_override_default_flags(overrides)(name, way, extra_opts) return apply def compile_fail_without_flag(flag): def apply(name, way, extra_opts): overrides = [f for f in getTestOpts.compiler_always_flags if f != flag] return compile_fail_override_default_flags(overrides)(name, way, extra_opts) return apply def compile( name, way, extra_hc_opts ): return do_compile( name, way, 0, '', [], extra_hc_opts ) def compile_fail( name, way, extra_hc_opts ): return do_compile( name, way, 1, '', [], extra_hc_opts ) def multimod_compile( name, way, top_mod, extra_hc_opts ): return do_compile( name, way, 0, top_mod, [], extra_hc_opts ) def multimod_compile_fail( name, way, top_mod, extra_hc_opts ): return do_compile( name, way, 1, top_mod, [], extra_hc_opts ) def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ): return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts) def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ): return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts) def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ): # print 'Compile only, extra args = ', extra_hc_opts pretest_cleanup(name) result = extras_build( way, extra_mods, extra_hc_opts ) if badResult(result): return result extra_hc_opts = result['hc_opts'] force = 0 if extra_mods: force = 1 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags ) if badResult(result): return result # the actual stderr should always match the expected, regardless # of whether we expected the compilation to fail or not (successful # compilations may generate warnings). if getTestOpts().with_namebase == None: namebase = name else: namebase = getTestOpts().with_namebase (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr') actual_stderr_file = qualify(name, 'comp.stderr') if not compare_outputs(way, 'stderr', join_normalisers(getTestOpts().extra_errmsg_normaliser, normalise_errmsg, normalise_whitespace), expected_stderr_file, actual_stderr_file): return failBecause('stderr mismatch') # no problems found, this test passed return passed() def compile_cmp_asm( name, way, extra_hc_opts ): print('Compile only, extra args = ', extra_hc_opts) pretest_cleanup(name) result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0) if badResult(result): return result # the actual stderr should always match the expected, regardless # of whether we expected the compilation to fail or not (successful # compilations may generate warnings). if getTestOpts().with_namebase == None: namebase = name else: namebase = getTestOpts().with_namebase (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm') actual_asm_file = qualify(name, 's') if not compare_outputs(way, 'asm', join_normalisers(normalise_errmsg, normalise_asm), expected_asm_file, actual_asm_file): return failBecause('asm mismatch') # no problems found, this test passed return passed() # ----------------------------------------------------------------------------- # Compile-and-run tests def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ): # print 'Compile and run, extra args = ', extra_hc_opts pretest_cleanup(name) result = extras_build( way, extra_mods, extra_hc_opts ) if badResult(result): return result extra_hc_opts = result['hc_opts'] if way == 'ghci': # interpreted... return interpreter_run( name, way, extra_hc_opts, 0, top_mod ) else: # compiled... force = 0 if extra_mods: force = 1 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force) if badResult(result): return result cmd = './' + name; # we don't check the compiler's stderr for a compile-and-run test return simple_run( name, way, cmd, getTestOpts().extra_run_opts ) def compile_and_run( name, way, extra_hc_opts ): return compile_and_run__( name, way, '', [], extra_hc_opts) def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ): return compile_and_run__( name, way, top_mod, [], extra_hc_opts) def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ): return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts) def stats( name, way, stats_file ): opts = getTestOpts() return checkStats(name, way, stats_file, opts.stats_range_fields) # ----------------------------------------------------------------------------- # Check -t stats info def checkStats(name, way, stats_file, range_fields): full_name = name + '(' + way + ')' result = passed() if len(range_fields) > 0: try: f = open(in_testdir(stats_file)) except IOError as e: return failBecause(str(e)) contents = f.read() f.close() for (field, (expected, dev)) in range_fields.items(): m = re.search('\("' + field + '", "([0-9]+)"\)', contents) if m == None: print('Failed to find field: ', field) result = failBecause('no such stats field') val = int(m.group(1)) lowerBound = trunc( expected * ((100 - float(dev))/100)) upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100))) deviation = round(((float(val) * 100)/ expected) - 100, 1) if val < lowerBound: print(field, 'value is too low:') print('(If this is because you have improved GHC, please') print('update the test so that GHC doesn\'t regress again)') result = failBecause('stat too good', tag='stat') if val > upperBound: print(field, 'value is too high:') result = failBecause('stat not good enough', tag='stat') if val < lowerBound or val > upperBound or config.verbose >= 4: valStr = str(val) valLen = len(valStr) expectedStr = str(expected) expectedLen = len(expectedStr) length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val]) def display(descr, val, extra): print(descr, str(val).rjust(length), extra) display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%') display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '') display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '') display(' Actual ' + full_name + ' ' + field + ':', val, '') if val != expected: display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%') return result # ----------------------------------------------------------------------------- # Build a single-module program def extras_build( way, extra_mods, extra_hc_opts ): for modopts in extra_mods: mod, opts = modopts result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0) if not (mod.endswith('.hs') or mod.endswith('.lhs')): extra_hc_opts += ' ' + replace_suffix(mod, 'o') if badResult(result): return result return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts} def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ): opts = getTestOpts() errname = add_suffix(name, 'comp.stderr') rm_no_fail( qualify(errname, '') ) if top_mod != '': srcname = top_mod rm_no_fail( qualify(name, '') ) base, suf = os.path.splitext(top_mod) rm_no_fail( qualify(base, '') ) rm_no_fail( qualify(base, 'exe') ) elif addsuf: srcname = add_hs_lhs_suffix(name) rm_no_fail( qualify(name, '') ) else: srcname = name rm_no_fail( qualify(name, 'o') ) rm_no_fail( qualify(replace_suffix(srcname, "o"), '') ) to_do = '' if top_mod != '': to_do = '--make ' if link: to_do = to_do + '-o ' + name elif link: to_do = '-o ' + name elif opts.compile_to_hc: to_do = '-C' else: to_do = '-c' # just compile stats_file = name + '.comp.stats' if len(opts.compiler_stats_range_fields) > 0: extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS' # Required by GHC 7.3+, harmless for earlier versions: if (getTestOpts().c_src or getTestOpts().objc_src or getTestOpts().objcpp_src or getTestOpts().cmm_src): extra_hc_opts += ' -no-hs-main ' if getTestOpts().compile_cmd_prefix == '': cmd_prefix = '' else: cmd_prefix = getTestOpts().compile_cmd_prefix + ' ' flags = ' '.join(get_compiler_flags(override_flags, noforce) + config.way_flags(name)[way]) cmd = ('cd {opts.testdir} && {cmd_prefix} ' '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} ' '> {errname} 2>&1' ).format(**locals()) result = runCmdFor(name, cmd) if result != 0 and not should_fail: actual_stderr = qualify(name, 'comp.stderr') if_verbose(1,'Compile failed (status ' + repr(result) + ') errors were:') if_verbose_dump(1,actual_stderr) # ToDo: if the sub-shell was killed by ^C, then exit statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields) if badResult(statsResult): return statsResult if should_fail: if result == 0: return failBecause('exit code 0') else: if result != 0: return failBecause('exit code non-0') return passed() # ----------------------------------------------------------------------------- # Run a program and check its output # # If testname.stdin exists, route input from that, else # from /dev/null. Route output to testname.run.stdout and # testname.run.stderr. Returns the exit code of the run. def simple_run( name, way, prog, args ): opts = getTestOpts() # figure out what to use for stdin if opts.stdin != '': use_stdin = opts.stdin else: stdin_file = add_suffix(name, 'stdin') if os.path.exists(in_testdir(stdin_file)): use_stdin = stdin_file else: use_stdin = '/dev/null' run_stdout = add_suffix(name,'run.stdout') run_stderr = add_suffix(name,'run.stderr') rm_no_fail(qualify(name,'run.stdout')) rm_no_fail(qualify(name,'run.stderr')) rm_no_fail(qualify(name, 'hp')) rm_no_fail(qualify(name,'ps')) rm_no_fail(qualify(name, 'prof')) my_rts_flags = rts_flags(way) stats_file = name + '.stats' if len(opts.stats_range_fields) > 0: args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS' if opts.no_stdin: stdin_comes_from = '' else: stdin_comes_from = ' <' + use_stdin if opts.combined_output: redirection = ' > {0} 2>&1'.format(run_stdout) redirection_append = ' >> {0} 2>&1'.format(run_stdout) else: redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr) redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr) cmd = prog + ' ' + args + ' ' \ + my_rts_flags + ' ' \ + stdin_comes_from \ + redirection if opts.cmd_wrapper != None: cmd = opts.cmd_wrapper(cmd) + redirection_append cmd = 'cd ' + opts.testdir + ' && ' + cmd # run the command result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier) exit_code = result >> 8 signal = result & 0xff # check the exit code if exit_code != opts.exit_code: print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')') dump_stdout(name) dump_stderr(name) return failBecause('bad exit code') check_hp = my_rts_flags.find("-h") != -1 check_prof = my_rts_flags.find("-p") != -1 if not opts.ignore_output: bad_stderr = not opts.combined_output and not check_stderr_ok(name, way) bad_stdout = not check_stdout_ok(name, way) if bad_stderr: return failBecause('bad stderr') if bad_stdout: return failBecause('bad stdout') # exit_code > 127 probably indicates a crash, so don't try to run hp2ps. if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name): return failBecause('bad heap profile') if check_prof and not check_prof_ok(name, way): return failBecause('bad profile') return checkStats(name, way, stats_file, opts.stats_range_fields) def rts_flags(way): if (way == ''): return '' else: args = config.way_rts_flags[way] if args == []: return '' else: return '+RTS ' + ' '.join(args) + ' -RTS' # ----------------------------------------------------------------------------- # Run a program in the interpreter and check its output def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ): outname = add_suffix(name, 'interp.stdout') errname = add_suffix(name, 'interp.stderr') rm_no_fail(outname) rm_no_fail(errname) rm_no_fail(name) if (top_mod == ''): srcname = add_hs_lhs_suffix(name) else: srcname = top_mod scriptname = add_suffix(name, 'genscript') qscriptname = in_testdir(scriptname) rm_no_fail(qscriptname) delimiter = '===== program output begins here\n' script = open(qscriptname, 'w') if not compile_only: # set the prog name and command-line args to match the compiled # environment. script.write(':set prog ' + name + '\n') script.write(':set args ' + getTestOpts().extra_run_opts + '\n') # Add marker lines to the stdout and stderr output files, so we # can separate GHCi's output from the program's. script.write(':! echo ' + delimiter) script.write(':! echo 1>&2 ' + delimiter) # Set stdout to be line-buffered to match the compiled environment. script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n') # wrapping in GHC.TopHandler.runIO ensures we get the same output # in the event of an exception as for the compiled program. script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n') script.close() # figure out what to use for stdin if getTestOpts().stdin != '': stdin_file = in_testdir(getTestOpts().stdin) else: stdin_file = qualify(name, 'stdin') if os.path.exists(stdin_file): stdin = open(stdin_file, 'r') os.system('cat ' + stdin_file + ' >>' + qscriptname) script.close() flags = ' '.join(get_compiler_flags(override_flags=None, noforce=False) + config.way_flags(name)[way]) if getTestOpts().combined_output: redirection = ' > {0} 2>&1'.format(outname) redirection_append = ' >> {0} 2>&1'.format(outname) else: redirection = ' > {0} 2> {1}'.format(outname, errname) redirection_append = ' >> {0} 2>> {1}'.format(outname, errname) cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} ' '< {scriptname} {redirection}' ).format(**locals()) if getTestOpts().cmd_wrapper != None: cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append; cmd = 'cd ' + getTestOpts().testdir + " && " + cmd result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier) exit_code = result >> 8 signal = result & 0xff # split the stdout into compilation/program output split_file(in_testdir(outname), delimiter, qualify(name, 'comp.stdout'), qualify(name, 'run.stdout')) split_file(in_testdir(errname), delimiter, qualify(name, 'comp.stderr'), qualify(name, 'run.stderr')) # check the exit code if exit_code != getTestOpts().exit_code: print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')') dump_stdout(name) dump_stderr(name) return failBecause('bad exit code') # ToDo: if the sub-shell was killed by ^C, then exit if getTestOpts().ignore_output or (check_stderr_ok(name, way) and check_stdout_ok(name, way)): return passed() else: return failBecause('bad stdout or stderr') def split_file(in_fn, delimiter, out1_fn, out2_fn): infile = open(in_fn) out1 = open(out1_fn, 'w') out2 = open(out2_fn, 'w') line = infile.readline() line = re.sub('\r', '', line) # ignore Windows EOL while (re.sub('^\s*','',line) != delimiter and line != ''): out1.write(line) line = infile.readline() line = re.sub('\r', '', line) out1.close() line = infile.readline() while (line != ''): out2.write(line) line = infile.readline() out2.close() # ----------------------------------------------------------------------------- # Utils def get_compiler_flags(override_flags, noforce): opts = getTestOpts() if override_flags is not None: flags = copy.copy(override_flags) else: flags = copy.copy(opts.compiler_always_flags) if noforce: flags = [f for f in flags if f != '-fforce-recomp'] flags.append(opts.extra_hc_opts) if opts.outputdir != None: flags.extend(["-outputdir", opts.outputdir]) return flags def check_stdout_ok(name, way): if getTestOpts().with_namebase == None: namebase = name else: namebase = getTestOpts().with_namebase actual_stdout_file = qualify(name, 'run.stdout') (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout') def norm(str): if platform_specific: return str else: return normalise_output(str) extra_norm = join_normalisers(norm, getTestOpts().extra_normaliser) check_stdout = getTestOpts().check_stdout if check_stdout: return check_stdout(actual_stdout_file, extra_norm) return compare_outputs(way, 'stdout', extra_norm, expected_stdout_file, actual_stdout_file) def dump_stdout( name ): print('Stdout:') print(read_no_crs(qualify(name, 'run.stdout'))) def check_stderr_ok(name, way): if getTestOpts().with_namebase == None: namebase = name else: namebase = getTestOpts().with_namebase actual_stderr_file = qualify(name, 'run.stderr') (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr') def norm(str): if platform_specific: return str else: return normalise_errmsg(str) return compare_outputs(way, 'stderr', join_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \ expected_stderr_file, actual_stderr_file) def dump_stderr( name ): print("Stderr:") print(read_no_crs(qualify(name, 'run.stderr'))) def read_no_crs(file): str = '' try: h = open(file) str = h.read() h.close except: # On Windows, if the program fails very early, it seems the # files stdout/stderr are redirected to may not get created pass return re.sub('\r', '', str) def write_file(file, str): h = open(file, 'w') h.write(str) h.close def check_hp_ok(name): # do not qualify for hp2ps because we should be in the right directory hp2psCmd = "cd " + getTestOpts().testdir + " && {hp2ps} " + name hp2psResult = runCmdExitCode(hp2psCmd) actual_ps_file = qualify(name, 'ps') if(hp2psResult == 0): if (os.path.exists(actual_ps_file)): if gs_working: gsResult = runCmdExitCode(genGSCmd(actual_ps_file)) if (gsResult == 0): return (True) else: print("hp2ps output for " + name + "is not valid PostScript") else: return (True) # assume postscript is valid without ghostscript else: print("hp2ps did not generate PostScript for " + name) return (False) else: print("hp2ps error when processing heap profile for " + name) return(False) def check_prof_ok(name, way): prof_file = qualify(name,'prof') if not os.path.exists(prof_file): print(prof_file + " does not exist") return(False) if os.path.getsize(qualify(name,'prof')) == 0: print(prof_file + " is empty") return(False) if getTestOpts().with_namebase == None: namebase = name else: namebase = getTestOpts().with_namebase (platform_specific, expected_prof_file) = \ platform_wordsize_qualify(namebase, 'prof.sample') # sample prof file is not required if not os.path.exists(expected_prof_file): return True else: return compare_outputs(way, 'prof', join_normalisers(normalise_whitespace,normalise_prof), \ expected_prof_file, prof_file) # Compare expected output to actual output, and optionally accept the # new output. Returns true if output matched or was accepted, false # otherwise. def compare_outputs(way, kind, normaliser, expected_file, actual_file): if os.path.exists(expected_file): expected_raw = read_no_crs(expected_file) # print "norm:", normaliser(expected_raw) expected_str = normaliser(expected_raw) expected_file_for_diff = expected_file else: expected_str = '' expected_file_for_diff = '/dev/null' actual_raw = read_no_crs(actual_file) actual_str = normaliser(actual_raw) if expected_str == actual_str: return 1 else: if_verbose(1, 'Actual ' + kind + ' output differs from expected:') if expected_file_for_diff == '/dev/null': expected_normalised_file = '/dev/null' else: expected_normalised_file = expected_file + ".normalised" write_file(expected_normalised_file, expected_str) actual_normalised_file = actual_file + ".normalised" write_file(actual_normalised_file, actual_str) # Ignore whitespace when diffing. We should only get to this # point if there are non-whitespace differences # # Note we are diffing the *actual* output, not the normalised # output. The normalised output may have whitespace squashed # (including newlines) so the diff would be hard to read. # This does mean that the diff might contain changes that # would be normalised away. if (config.verbose >= 1): r = os.system( 'diff -uw ' + expected_file_for_diff + \ ' ' + actual_file ) # If for some reason there were no non-whitespace differences, # then do a full diff if r == 0: r = os.system( 'diff -u ' + expected_file_for_diff + \ ' ' + actual_file ) if config.accept and (getTestOpts().expect == 'fail' or way in getTestOpts().expect_fail_for): if_verbose(1, 'Test is expected to fail. Not accepting new output.') return 0 elif config.accept: if_verbose(1, 'Accepting new output.') write_file(expected_file, actual_raw) return 1 else: return 0 def normalise_whitespace( str ): # Merge contiguous whitespace characters into a single space. str = re.sub('[ \t\n]+', ' ', str) return str def normalise_errmsg( str ): # remove " error:" and lower-case " Warning:" to make patch for # trac issue #10021 smaller str = modify_lines(str, lambda l: re.sub(' error:', '', l)) str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l)) # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows) # the colon is there because it appears in error messages; this # hacky solution is used in place of more sophisticated filename # mangling str = re.sub('([^\\s])\\.exe', '\\1', str) # normalise slashes, minimise Windows/Unix filename differences str = re.sub('\\\\', '/', str) # The inplace ghc's are called ghc-stage[123] to avoid filename # collisions, so we need to normalise that to just "ghc" str = re.sub('ghc-stage[123]', 'ghc', str) # Error messages simetimes contain integer implementation package str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str) return str # normalise a .prof file, so that we can reasonably compare it against # a sample. This doesn't compare any of the actual profiling data, # only the shape of the profile and the number of entries. def normalise_prof (str): # strip everything up to the line beginning "COST CENTRE" str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str) # strip results for CAFs, these tend to change unpredictably str = re.sub('[ \t]*(CAF|IDLE).*\n','',str) # XXX Ignore Main.main. Sometimes this appears under CAF, and # sometimes under MAIN. str = re.sub('[ \t]*main[ \t]+Main.*\n','',str) # We have somthing like this: # MAIN MAIN 101 0 0.0 0.0 100.0 100.0 # k Main 204 1 0.0 0.0 0.0 0.0 # foo Main 205 1 0.0 0.0 0.0 0.0 # foo.bar Main 207 1 0.0 0.0 0.0 0.0 # then we remove all the specific profiling data, leaving only the # cost centre name, module, and entries, to end up with this: # MAIN MAIN 0 # k Main 1 # foo Main 1 # foo.bar Main 1 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str) return str def normalise_slashes_( str ): str = re.sub('\\\\', '/', str) return str def normalise_exe_( str ): str = re.sub('\.exe', '', str) return str def normalise_output( str ): # remove " error:" and lower-case " Warning:" to make patch for # trac issue #10021 smaller str = modify_lines(str, lambda l: re.sub(' error:', '', l)) str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l)) # Remove a .exe extension (for Windows) # This can occur in error messages generated by the program. str = re.sub('([^\\s])\\.exe', '\\1', str) return str def normalise_asm( str ): lines = str.split('\n') # Only keep instructions and labels not starting with a dot. metadata = re.compile('^[ \t]*\\..*$') out = [] for line in lines: # Drop metadata directives (e.g. ".type") if not metadata.match(line): line = re.sub('@plt', '', line) instr = line.lstrip().split() # Drop empty lines. if not instr: continue # Drop operands, except for call instructions. elif instr[0] == 'call': out.append(instr[0] + ' ' + instr[1]) else: out.append(instr[0]) out = '\n'.join(out) return out def if_verbose( n, s ): if config.verbose >= n: print(s) def if_verbose_dump( n, f ): if config.verbose >= n: try: print(open(f).read()) except: print('') def rawSystem(cmd_and_args): # We prefer subprocess.call to os.spawnv as the latter # seems to send its arguments through a shell or something # with the Windows (non-cygwin) python. An argument "a b c" # turns into three arguments ["a", "b", "c"]. cmd = cmd_and_args[0] return subprocess.call([strip_quotes(cmd)] + cmd_and_args[1:]) # When running under native msys Python, any invocations of non-msys binaries, # including timeout.exe, will have their arguments munged according to some # heuristics, which leads to malformed command lines (#9626). The easiest way # to avoid problems is to invoke through /usr/bin/cmd which sidesteps argument # munging because it is a native msys application. def passThroughCmd(cmd_and_args): args = [] # cmd needs a Windows-style path for its first argument. args.append(cmd_and_args[0].replace('/', '\\')) # Other arguments need to be quoted to deal with spaces. args.extend(['"%s"' % arg for arg in cmd_and_args[1:]]) return ["cmd", "/c", " ".join(args)] # Note that this doesn't handle the timeout itself; it is just used for # commands that have timeout handling built-in. def rawSystemWithTimeout(cmd_and_args): if config.os == 'mingw32' and sys.executable.startswith('/usr'): # This is only needed when running under msys python. cmd_and_args = passThroughCmd(cmd_and_args) r = rawSystem(cmd_and_args) if r == 98: # The python timeout program uses 98 to signal that ^C was pressed stopNow() return r # cmd is a complex command in Bourne-shell syntax # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc) # Hence it must ultimately be run by a Bourne shell # # Mostly it invokes the command wrapped in 'timeout' thus # timeout 300 'cd . && ...blah blah' # so it's timeout's job to invoke the Bourne shell # # But watch out for the case when there is no timeout program! # Then, when using the native Python, os.system will invoke the cmd shell def runCmd( cmd ): # Format cmd using config. Example: cmd='{hpc} report A.tix' cmd = cmd.format(**config.__dict__) if_verbose( 3, cmd ) r = 0 if config.os == 'mingw32': # On MinGW, we will always have timeout assert config.timeout_prog!='' if config.timeout_prog != '': r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd]) else: r = os.system(cmd) return r << 8 def runCmdFor( name, cmd, timeout_multiplier=1.0 ): # Format cmd using config. Example: cmd='{hpc} report A.tix' cmd = cmd.format(**config.__dict__) if_verbose( 3, cmd ) r = 0 if config.os == 'mingw32': # On MinGW, we will always have timeout assert config.timeout_prog!='' timeout = int(ceil(config.timeout * timeout_multiplier)) if config.timeout_prog != '': if config.check_files_written: fn = name + ".strace" r = rawSystemWithTimeout( ["strace", "-o", fn, "-fF", "-e", "creat,open,chdir,clone,vfork", config.timeout_prog, str(timeout), cmd]) addTestFilesWritten(name, fn) rm_no_fail(fn) else: r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd]) else: r = os.system(cmd) return r << 8 def runCmdExitCode( cmd ): return (runCmd(cmd) >> 8); # ----------------------------------------------------------------------------- # checking for files being written to by multiple tests re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$' re_strace_unavailable = re.compile('^\) += \? <unavailable>$') re_strace_pid = re.compile('^([0-9]+) +(.*)') re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$') re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$') re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$') re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$') re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$') re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$') re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end) re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end) re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$') re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$') re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$') re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$') re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$') re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$') # Files that are read or written but shouldn't be: # * ghci_history shouldn't be read or written by tests # * things under package.conf.d shouldn't be written by tests bad_file_usages = {} # Mapping from tests to the list of files that they write files_written = {} # Mapping from tests to the list of files that they write but don't clean files_written_not_removed = {} def add_bad_file_usage(name, file): try: if not file in bad_file_usages[name]: bad_file_usages[name].append(file) except: bad_file_usages[name] = [file] def mkPath(curdir, path): # Given the current full directory is 'curdir', what is the full # path to 'path'? return os.path.realpath(os.path.join(curdir, path)) def addTestFilesWritten(name, fn): if config.use_threads: with t.lockFilesWritten: addTestFilesWrittenHelper(name, fn) else: addTestFilesWrittenHelper(name, fn) def addTestFilesWrittenHelper(name, fn): started = False working_directories = {} with open(fn, 'r') as f: for line in f: m_pid = re_strace_pid.match(line) if m_pid: pid = m_pid.group(1) content = m_pid.group(2) elif re_strace_unavailable.match(line): next else: framework_fail(name, 'strace', "Can't find pid in strace line: " + line) m_open = re_strace_open.match(content) m_chdir = re_strace_chdir.match(content) m_clone = re_strace_clone.match(content) m_vfork = re_strace_vfork.match(content) if not started: working_directories[pid] = os.getcwd() started = True if m_open: file = m_open.group(1) file = mkPath(working_directories[pid], file) if file.endswith("ghci_history"): add_bad_file_usage(name, file) elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"): flags = m_open.group(2).split('|') if 'O_WRONLY' in flags or 'O_RDWR' in flags: if re.match('package\.conf\.d', file): add_bad_file_usage(name, file) else: try: if not file in files_written[name]: files_written[name].append(file) except: files_written[name] = [file] elif 'O_RDONLY' in flags: pass else: framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line) elif m_chdir: # We optimistically assume that unfinished chdir's are going to succeed dir = m_chdir.group(1) working_directories[pid] = mkPath(working_directories[pid], dir) elif m_clone: working_directories[m_clone.group(2)] = working_directories[pid] elif m_vfork: working_directories[m_vfork.group(2)] = working_directories[pid] elif re_strace_open_resumed.match(content): pass elif re_strace_chdir_resumed.match(content): pass elif re_strace_vfork_unfinished.match(content): pass elif re_strace_clone_unfinished.match(content): pass elif re_strace_ignore_sigchild.match(content): pass elif re_strace_ignore_sigvtalarm.match(content): pass elif re_strace_ignore_sigint.match(content): pass elif re_strace_ignore_sigfpe.match(content): pass elif re_strace_ignore_sigsegv.match(content): pass elif re_strace_ignore_sigpipe.match(content): pass else: framework_fail(name, 'strace', "Can't understand strace line: " + line) def checkForFilesWrittenProblems(file): foundProblem = False files_written_inverted = {} for t in files_written.keys(): for f in files_written[t]: try: files_written_inverted[f].append(t) except: files_written_inverted[f] = [t] for f in files_written_inverted.keys(): if len(files_written_inverted[f]) > 1: if not foundProblem: foundProblem = True file.write("\n") file.write("\nSome files are written by multiple tests:\n") file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n") if foundProblem: file.write("\n") # ----- if len(files_written_not_removed) > 0: file.write("\n") file.write("\nSome files written but not removed:\n") tests = list(files_written_not_removed.keys()) tests.sort() for t in tests: for f in files_written_not_removed[t]: file.write(" " + t + ": " + f + "\n") file.write("\n") # ----- if len(bad_file_usages) > 0: file.write("\n") file.write("\nSome bad file usages:\n") tests = list(bad_file_usages.keys()) tests.sort() for t in tests: for f in bad_file_usages[t]: file.write(" " + t + ": " + f + "\n") file.write("\n") # ----------------------------------------------------------------------------- # checking if ghostscript is available for checking the output of hp2ps def genGSCmd(psfile): return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile); def gsNotWorking(): global gs_working print("GhostScript not available for hp2ps tests") global gs_working gs_working = 0 if config.have_profiling: if config.gs != '': resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps')); if resultGood == 0: resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps') + ' >/dev/null 2>&1') if resultBad != 0: print("GhostScript available for hp2ps tests") gs_working = 1; else: gsNotWorking(); else: gsNotWorking(); else: gsNotWorking(); def rm_no_fail( file ): try: os.remove( file ) finally: return def add_suffix( name, suffix ): if suffix == '': return name else: return name + '.' + suffix def add_hs_lhs_suffix(name): if getTestOpts().c_src: return add_suffix(name, 'c') elif getTestOpts().cmm_src: return add_suffix(name, 'cmm') elif getTestOpts().objc_src: return add_suffix(name, 'm') elif getTestOpts().objcpp_src: return add_suffix(name, 'mm') elif getTestOpts().literate: return add_suffix(name, 'lhs') else: return add_suffix(name, 'hs') def replace_suffix( name, suffix ): base, suf = os.path.splitext(name) return base + '.' + suffix def in_testdir( name ): return (getTestOpts().testdir + '/' + name) def qualify( name, suff ): return in_testdir(add_suffix(name, suff)) # Finding the sample output. The filename is of the form # # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>] # # and we pick the most specific version available. The <version> is # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For # more fine-grained control use if_compiler_lt(). # def platform_wordsize_qualify( name, suff ): basepath = qualify(name, suff) paths = [(platformSpecific, basepath + comp + vers + ws + plat) for (platformSpecific, plat) in [(1, '-' + config.platform), (1, '-' + config.os), (0, '')] for ws in ['-ws-' + config.wordsize, ''] for comp in ['-' + config.compiler_type, ''] for vers in ['-' + config.compiler_maj_version, '']] dir = glob.glob(basepath + '*') dir = [normalise_slashes_(d) for d in dir] for (platformSpecific, f) in paths: if f in dir: return (platformSpecific,f) return (0, basepath) # Clean up prior to the test, so that we can't spuriously conclude # that it passed on the basis of old run outputs. def pretest_cleanup(name): if getTestOpts().outputdir != None: odir = in_testdir(getTestOpts().outputdir) try: shutil.rmtree(odir) except: pass os.mkdir(odir) rm_no_fail(qualify(name,'interp.stderr')) rm_no_fail(qualify(name,'interp.stdout')) rm_no_fail(qualify(name,'comp.stderr')) rm_no_fail(qualify(name,'comp.stdout')) rm_no_fail(qualify(name,'run.stderr')) rm_no_fail(qualify(name,'run.stdout')) rm_no_fail(qualify(name,'tix')) rm_no_fail(qualify(name,'exe.tix')) # simple_build zaps the following: # rm_nofail(qualify("o")) # rm_nofail(qualify("")) # not interested in the return code # ----------------------------------------------------------------------------- # Return a list of all the files ending in '.T' below directories roots. def findTFiles(roots): # It would be better to use os.walk, but that # gives backslashes on Windows, which trip the # testsuite later :-( return [filename for root in roots for filename in findTFiles_(root)] def findTFiles_(path): if os.path.isdir(path): paths = [path + '/' + x for x in os.listdir(path)] return findTFiles(paths) elif path[-2:] == '.T': return [path] else: return [] # ----------------------------------------------------------------------------- # Output a test summary to the specified file object def summary(t, file): file.write('\n') printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures]) file.write('OVERALL SUMMARY for test run started at ' + time.strftime("%c %Z", t.start_time) + '\n' + str(datetime.timedelta(seconds= round(time.time() - time.mktime(t.start_time)))).rjust(8) + ' spent to go through\n' + repr(t.total_tests).rjust(8) + ' total tests, which gave rise to\n' + repr(t.total_test_cases).rjust(8) + ' test cases, of which\n' + repr(t.n_tests_skipped).rjust(8) + ' were skipped\n' + '\n' + repr(t.n_missing_libs).rjust(8) + ' had missing libraries\n' + repr(t.n_expected_passes).rjust(8) + ' expected passes\n' + repr(t.n_expected_failures).rjust(8) + ' expected failures\n' + '\n' + repr(t.n_framework_failures).rjust(8) + ' caused framework failures\n' + repr(t.n_unexpected_passes).rjust(8) + ' unexpected passes\n' + repr(t.n_unexpected_failures).rjust(8) + ' unexpected failures\n' + repr(t.n_unexpected_stat_failures).rjust(8) + ' unexpected stat failures\n' + '\n') if t.n_unexpected_passes > 0: file.write('Unexpected passes:\n') printPassingTestInfosSummary(file, t.unexpected_passes) if t.n_unexpected_failures > 0: file.write('Unexpected failures:\n') printFailingTestInfosSummary(file, t.unexpected_failures) if t.n_unexpected_stat_failures > 0: file.write('Unexpected stat failures:\n') printFailingTestInfosSummary(file, t.unexpected_stat_failures) if config.check_files_written: checkForFilesWrittenProblems(file) if stopping(): file.write('WARNING: Testsuite run was terminated early\n') def printUnexpectedTests(file, testInfoss): unexpected = [] for testInfos in testInfoss: directories = testInfos.keys() for directory in directories: tests = list(testInfos[directory].keys()) unexpected += tests if unexpected != []: file.write('Unexpected results from:\n') file.write('TEST="' + ' '.join(unexpected) + '"\n') file.write('\n') def printPassingTestInfosSummary(file, testInfos): directories = list(testInfos.keys()) directories.sort() maxDirLen = max(len(x) for x in directories) for directory in directories: tests = list(testInfos[directory].keys()) tests.sort() for test in tests: file.write(' ' + directory.ljust(maxDirLen + 2) + test + \ ' (' + ','.join(testInfos[directory][test]) + ')\n') file.write('\n') def printFailingTestInfosSummary(file, testInfos): directories = list(testInfos.keys()) directories.sort() maxDirLen = max(len(d) for d in directories) for directory in directories: tests = list(testInfos[directory].keys()) tests.sort() for test in tests: reasons = testInfos[directory][test].keys() for reason in reasons: file.write(' ' + directory.ljust(maxDirLen + 2) + test + \ ' [' + reason + ']' + \ ' (' + ','.join(testInfos[directory][test][reason]) + ')\n') file.write('\n') def modify_lines(s, f): return '\n'.join([f(l) for l in s.splitlines()])
{ "content_hash": "b886eedaaba8c7431a0f81996c7714c2", "timestamp": "", "source": "github", "line_count": 2276, "max_line_length": 187, "avg_line_length": 33.54701230228471, "alnum_prop": 0.5714248294107631, "repo_name": "fmthoma/ghc", "id": "98a75e011e566eaa6a31ac6ac145ed286308910a", "size": "76382", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "testsuite/driver/testlib.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Assembly", "bytes": "5560" }, { "name": "Bison", "bytes": "62771" }, { "name": "C", "bytes": "2545219" }, { "name": "C++", "bytes": "46652" }, { "name": "CSS", "bytes": "984" }, { "name": "DTrace", "bytes": "3887" }, { "name": "Emacs Lisp", "bytes": "734" }, { "name": "Game Maker Language", "bytes": "14164" }, { "name": "Gnuplot", "bytes": "103851" }, { "name": "Groff", "bytes": "3840" }, { "name": "HTML", "bytes": "6144" }, { "name": "Haskell", "bytes": "17769725" }, { "name": "Haxe", "bytes": "218" }, { "name": "Logos", "bytes": "120476" }, { "name": "Makefile", "bytes": "491235" }, { "name": "Objective-C", "bytes": "19654" }, { "name": "Objective-C++", "bytes": "535" }, { "name": "Pascal", "bytes": "112387" }, { "name": "Perl", "bytes": "195717" }, { "name": "Perl6", "bytes": "247762" }, { "name": "PostScript", "bytes": "63" }, { "name": "Python", "bytes": "105644" }, { "name": "Shell", "bytes": "70503" }, { "name": "TeX", "bytes": "667" } ], "symlink_target": "" }
__author__ = 'lothilius' import sys import time from Wordlist import * from Permutations import * def main(): while True: try: inputFileName = input("Enter full path of scrambled word list:") f = open(inputFileName) break except FileNotFoundError: print("\nThe directory is invalid. Please try again.") start = time.time() print("\nUsing binary tree wordList.") wordList = BinaryTreeWordList() print("Creating wordList.", end="\n") # We only add words to the Wordlist if they have 5 or 6 letters. wordList.addWordsFromFile(inputFileName, lambda x: len(x) in [4, 13]) print("The Wordlist contains ", len(wordList), " words.") end = time.time() print("Building the Wordlist took %2.3f seconds" % (end - start)) # Execute this loop until the user decides to exit. start = time.time() while True: word = input("\nEnter a scrambled word (or EXIT): ") word = word.strip().lower() # See if the word contains bad characters. if not word.isalpha(): print("Word contains illegal characters. Try again") continue # Should we terminate elif word == 'exit': print("\nThanks for playing! Goodbye.", end="\n\n") break # There's no need searching the Wordlist if the word isn't # a plausible length. elif not len(word) in range(4, 13): print( "Word must have 4 to 13 letters. Try again") continue else: break # Create a list of all permutations of the input # string. # print out how many unique perms there are. permsCount, uniquePermsCount = Permutations.howManyPerms(word) print("Found ", permsCount, "permutations; ", uniquePermsCount, "unique permutations") # We're going to check how many permutations we generated, and # how many comparisons were made against words in the wordList. permutationsChecked = 0 comparisonsMade = 0 # Iterate through the permutations until you find one that is # in the wordList, or fail if there are no hits. found = False for p in Permutations.allPerms(word): permutationsChecked += 1 permInList, comparisons = wordList.findWord(p) comparisonsMade += comparisons if permInList: print("Found word: " + p) found = True # With the break, stops after the first hit. Without it, # this tries all of the permutations. break if not found: print("Sorry. I can't solve this jumble! Try again.") end = time.time() # Print out the stats on this attempt. print("Solving this jumble took %2.5f seconds" % (end - start)) print("Checked ", permutationsChecked, " permutations.") print("Made ", comparisonsMade, " comparisons.") main()
{ "content_hash": "bdbbef7a8c146d8b099adec58f2e29f1", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 90, "avg_line_length": 33.20454545454545, "alnum_prop": 0.6197809719370294, "repo_name": "Lothilius/python-jumble", "id": "305815020b40b8d242bd45d6782f097dee28046e", "size": "3072", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Solver.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13639" } ], "symlink_target": "" }
from datetime import datetime from airflow.decorators import task from airflow.models import DAG from airflow.providers.microsoft.azure.hooks.fileshare import AzureFileShareHook NAME = 'myfileshare' DIRECTORY = "mydirectory" @task def create_fileshare(): """Create a fileshare with directory""" hook = AzureFileShareHook() hook.create_share(NAME) hook.create_directory(share_name=NAME, directory_name=DIRECTORY) exists = hook.check_for_directory(share_name=NAME, directory_name=DIRECTORY) if not exists: raise Exception @task def delete_fileshare(): """Delete a fileshare""" hook = AzureFileShareHook() hook.delete_share(NAME) with DAG( "example_fileshare", schedule_interval="@once", start_date=datetime(2021, 1, 1), catchup=False, ) as dag: create_fileshare() >> delete_fileshare()
{ "content_hash": "aec3c49dd01de1707ee8cd38af74ac11", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 80, "avg_line_length": 24.457142857142856, "alnum_prop": 0.7184579439252337, "repo_name": "lyft/incubator-airflow", "id": "d50db3cb040273936e7766f588d783ac4a61b2f4", "size": "1642", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "airflow/providers/microsoft/azure/example_dags/example_fileshare.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "13715" }, { "name": "Dockerfile", "bytes": "17280" }, { "name": "HTML", "bytes": "161328" }, { "name": "JavaScript", "bytes": "25360" }, { "name": "Jinja", "bytes": "8565" }, { "name": "Jupyter Notebook", "bytes": "2933" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "10019710" }, { "name": "Shell", "bytes": "220780" } ], "symlink_target": "" }
from django.contrib.admin import ModelAdmin, TabularInline from django.contrib.admin.helpers import InlineAdminForm from django.contrib.admin.tests import AdminSeleniumTestCase from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.test import RequestFactory, TestCase, override_settings from django.urls import reverse from .admin import InnerInline, site as admin_site from .models import ( Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2, Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent, ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection, Question, Sighting, SomeChildModel, SomeParentModel, Teacher, ) INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>' class TestDataMixin: @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser(username='super', email='super@example.com', password='secret') @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInline(TestDataMixin, TestCase): factory = RequestFactory() @classmethod def setUpTestData(cls): super().setUpTestData() cls.holder = Holder.objects.create(dummy=13) Inner.objects.create(dummy=42, holder=cls.holder) def setUp(self): self.client.force_login(self.superuser) def test_can_delete(self): """ can_delete should be passed to inlineformset factory. """ response = self.client.get( reverse('admin:admin_inlines_holder_change', args=(self.holder.id,)) ) inner_formset = response.context['inline_admin_formsets'][0].formset expected = InnerInline.can_delete actual = inner_formset.can_delete self.assertEqual(expected, actual, 'can_delete must be equal') def test_readonly_stacked_inline_label(self): """Bug #13174.""" holder = Holder.objects.create(dummy=42) Inner.objects.create(holder=holder, dummy=42, readonly='') response = self.client.get( reverse('admin:admin_inlines_holder_change', args=(holder.id,)) ) self.assertContains(response, '<label>Inner readonly label:</label>') def test_many_to_many_inlines(self): "Autogenerated many-to-many inlines are displayed correctly (#13407)" response = self.client.get(reverse('admin:admin_inlines_author_add')) # The heading for the m2m inline block uses the right text self.assertContains(response, '<h2>Author-book relationships</h2>') # The "add another" label is correct self.assertContains(response, 'Add another Author-book relationship') # The '+' is dropped from the autogenerated form prefix (Author_books+) self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_primary(self): person = Person.objects.create(firstname='Imelda') item = OutfitItem.objects.create(name='Shoes') # Imelda likes shoes, but can't carry her own bags. data = { 'shoppingweakness_set-TOTAL_FORMS': 1, 'shoppingweakness_set-INITIAL_FORMS': 0, 'shoppingweakness_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'person': person.id, 'max_weight': 0, 'shoppingweakness_set-0-item': item.id, } response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data) self.assertEqual(response.status_code, 302) self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1) def test_tabular_inline_column_css_class(self): """ Field names are included in the context to output a field-specific CSS class name in the column headers. """ response = self.client.get(reverse('admin:admin_inlines_poll_add')) text_field, call_me_field = list(response.context['inline_admin_formset'].fields()) # Editable field. self.assertEqual(text_field['name'], 'text') self.assertContains(response, '<th class="column-text required">') # Read-only field. self.assertEqual(call_me_field['name'], 'call_me') self.assertContains(response, '<th class="column-call_me">') def test_custom_form_tabular_inline_label(self): """ A model form with a form field specified (TitleForm.title1) should have its label rendered in the tabular inline. """ response = self.client.get(reverse('admin:admin_inlines_titlecollection_add')) self.assertContains(response, '<th class="column-title1 required">Title1</th>', html=True) def test_custom_form_tabular_inline_overridden_label(self): """ SomeChildModelForm.__init__() overrides the label of a form field. That label is displayed in the TabularInline. """ response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add')) field = list(response.context['inline_admin_formset'].fields())[0] self.assertEqual(field['label'], 'new label') self.assertContains(response, '<th class="column-name required">New label</th>', html=True) def test_tabular_non_field_errors(self): """ non_field_errors are displayed correctly, including the correct value for colspan. """ data = { 'title_set-TOTAL_FORMS': 1, 'title_set-INITIAL_FORMS': 0, 'title_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'title_set-0-title1': 'a title', 'title_set-0-title2': 'a different title', } response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data) # Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox. self.assertContains( response, '<tr><td colspan="4"><ul class="errorlist nonfield">' '<li>The two titles must be the same</li></ul></td></tr>' ) def test_no_parent_callable_lookup(self): """Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable""" # Identically named callable isn't present in the parent ModelAdmin, # rendering of the add view shouldn't explode response = self.client.get(reverse('admin:admin_inlines_novel_add')) self.assertEqual(response.status_code, 200) # View should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="chapter_set-group"' ) def test_callable_lookup(self): """Admin inline should invoke local callable when its name is listed in readonly_fields""" response = self.client.get(reverse('admin:admin_inlines_poll_add')) self.assertEqual(response.status_code, 200) # Add parent object view should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="question_set-group"' ) # The right callable should be used for the inline readonly_fields # column cells self.assertContains(response, '<p>Callable in QuestionInline</p>') def test_help_text(self): """ The inlines' model field help texts are displayed when using both the stacked and tabular layouts. """ response = self.client.get(reverse('admin:admin_inlines_holder4_add')) self.assertContains(response, '<div class="help">Awesome stacked help text is awesome.</div>', 4) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Awesome tabular help text is awesome.)" ' 'title="Awesome tabular help text is awesome.">', 1 ) # ReadOnly fields response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add')) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text for ReadOnlyInline)" ' 'title="Help text for ReadOnlyInline">', 1 ) def test_tabular_model_form_meta_readonly_field(self): """ Tabular inlines use ModelForm.Meta.help_texts and labels for read-only fields. """ response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add')) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text from ModelForm.Meta)" ' 'title="Help text from ModelForm.Meta">' ) self.assertContains(response, 'Label from ModelForm.Meta') def test_inline_hidden_field_no_column(self): """#18263 -- Make sure hidden fields don't get a column in tabular inlines""" parent = SomeParentModel.objects.create(name='a') SomeChildModel.objects.create(name='b', position='0', parent=parent) SomeChildModel.objects.create(name='c', position='1', parent=parent) response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,))) self.assertNotContains(response, '<td class="field-position">') self.assertInHTML( '<input id="id_somechildmodel_set-1-position" ' 'name="somechildmodel_set-1-position" type="hidden" value="1">', response.rendered_content, ) def test_non_related_name_inline(self): """ Multiple inlines with related_name='+' have correct form prefixes. """ response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add')) self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True) self.assertContains( response, '<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia">', html=True ) self.assertContains( response, '<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100">', html=True ) self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True) self.assertContains( response, '<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia">', html=True ) self.assertContains( response, '<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100">', html=True ) @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_localize_pk_shortcut(self): """ The "View on Site" link is correct for locales that use thousand separators. """ holder = Holder.objects.create(pk=123456789, dummy=42) inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='') response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,))) inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk) self.assertContains(response, inner_shortcut) def test_custom_pk_shortcut(self): """ The "View on Site" link is correct for models with a custom primary key field. """ parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',))) child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk) child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk) self.assertContains(response, child1_shortcut) self.assertContains(response, child2_shortcut) def test_create_inlines_on_inherited_model(self): """ An object can be created with inlines when it inherits another class. """ data = { 'name': 'Martian', 'sighting_set-TOTAL_FORMS': 1, 'sighting_set-INITIAL_FORMS': 0, 'sighting_set-MAX_NUM_FORMS': 0, 'sighting_set-0-place': 'Zone 51', '_save': 'Save', } response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data) self.assertEqual(response.status_code, 302) self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1) def test_custom_get_extra_form(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) # The maximum number of forms should respect 'get_max_num' on the # ModelAdmin max_forms_input = ( '<input id="id_binarytree_set-MAX_NUM_FORMS" ' 'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">' ) # The total number of forms will remain the same in either case total_forms_hidden = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">' ) response = self.client.get(reverse('admin:admin_inlines_binarytree_add')) self.assertInHTML(max_forms_input % 3, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,))) self.assertInHTML(max_forms_input % 2, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) def test_min_num(self): """ min_num and extra determine number of forms. """ class MinNumInline(TabularInline): model = BinaryTree min_num = 2 extra = 3 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">' ) request = self.factory.get(reverse('admin:admin_inlines_binarytree_add')) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms, response.rendered_content) self.assertInHTML(total_forms, response.rendered_content) def test_custom_min_num(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) class MinNumInline(TabularInline): model = BinaryTree extra = 3 def get_min_num(self, request, obj=None, **kwargs): if obj: return 5 return 2 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">' ) request = self.factory.get(reverse('admin:admin_inlines_binarytree_add')) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms % 2, response.rendered_content) self.assertInHTML(total_forms % 5, response.rendered_content) request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(bt_head.id)) self.assertInHTML(min_forms % 5, response.rendered_content) self.assertInHTML(total_forms % 8, response.rendered_content) def test_inline_nonauto_noneditable_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input id="id_nonautopkbook_set-0-rand_pk" ' 'name="nonautopkbook_set-0-rand_pk" type="hidden">', html=True ) self.assertContains( response, '<input id="id_nonautopkbook_set-2-0-rand_pk" ' 'name="nonautopkbook_set-2-0-rand_pk" type="hidden">', html=True ) def test_inline_nonauto_noneditable_inherited_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">', html=True ) self.assertContains( response, '<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">', html=True ) def test_inline_editable_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" ' 'name="editablepkbook_set-0-manual_pk" type="number">', html=True, count=1 ) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" ' 'name="editablepkbook_set-2-0-manual_pk" type="number">', html=True, count=1 ) def test_stacked_inline_edit_form_contains_has_original_class(self): holder = Holder.objects.create(dummy=1) holder.inner_set.create(dummy=1) response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,))) self.assertContains( response, '<div class="inline-related has_original" id="inner_set-0">', count=1 ) self.assertContains( response, '<div class="inline-related" id="inner_set-1">', count=1 ) def test_inlines_show_change_link_registered(self): "Inlines `show_change_link` for registered models when enabled." holder = Holder4.objects.create(dummy=1) item1 = Inner4Stacked.objects.create(dummy=1, holder=holder) item2 = Inner4Tabular.objects.create(dummy=1, holder=holder) items = ( ('inner4stacked', item1.pk), ('inner4tabular', item2.pk), ) response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,))) self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model) for model, pk in items: url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,)) self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML)) def test_inlines_show_change_link_unregistered(self): "Inlines `show_change_link` disabled for unregistered models." parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',))) self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_tabular_inline_show_change_link_false_registered(self): "Inlines `show_change_link` disabled by default." poll = Poll.objects.create(name="New poll") Question.objects.create(poll=poll) response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,))) self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_noneditable_inline_has_field_inputs(self): """Inlines without change permission shows field inputs on add form.""" response = self.client.get(reverse('admin:admin_inlines_novelreadonlychapter_add')) self.assertContains( response, '<input type="text" name="chapter_set-0-name" ' 'class="vTextField" maxlength="40" id="id_chapter_set-0-name">', html=True ) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineMedia(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_inline_media_only_base(self): holder = Holder(dummy=13) holder.save() Inner(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') def test_inline_media_only_inline(self): holder = Holder3(dummy=13) holder.save() Inner3(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,)) response = self.client.get(change_url) self.assertEqual( response.context['inline_admin_formsets'][0].media._js, [ 'admin/js/vendor/jquery/jquery.min.js', 'admin/js/jquery.init.js', 'admin/js/inlines.min.js', 'my_awesome_inline_scripts.js', 'custom_number.js', ] ) self.assertContains(response, 'my_awesome_inline_scripts.js') def test_all_inline_media(self): holder = Holder2(dummy=13) holder.save() Inner2(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') self.assertContains(response, 'my_awesome_inline_scripts.js') @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineAdminForm(TestCase): def test_immutable_content_type(self): """Regression for #9362 The problem depends only on InlineAdminForm and its "original" argument, so we can safely set the other arguments to None/{}. We just need to check that the content_type argument of Child isn't altered by the internals of the inline form.""" sally = Teacher.objects.create(name='Sally') john = Parent.objects.create(name='John') joe = Child.objects.create(name='Joe', teacher=sally, parent=john) iaf = InlineAdminForm(None, None, {}, {}, joe) parent_ct = ContentType.objects.get_for_model(Parent) self.assertEqual(iaf.original.content_type, parent_ct) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineProtectedOnDelete(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_deleting_inline_with_protected_delete_does_not_validate(self): lotr = Novel.objects.create(name='Lord of the rings') chapter = Chapter.objects.create(novel=lotr, name='Many Meetings') foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda') change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,)) response = self.client.get(change_url) data = { 'name': lotr.name, 'chapter_set-TOTAL_FORMS': 1, 'chapter_set-INITIAL_FORMS': 1, 'chapter_set-MAX_NUM_FORMS': 1000, '_save': 'Save', 'chapter_set-0-id': chapter.id, 'chapter_set-0-name': chapter.name, 'chapter_set-0-novel': lotr.id, 'chapter_set-0-DELETE': 'on' } response = self.client.post(change_url, data) self.assertContains(response, "Deleting chapter %s would require deleting " "the following protected related objects: foot note %s" % (chapter, foot_note)) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlinePermissions(TestCase): """ Make sure the admin respects permissions for objects that are edited inline. Refs #8060. """ @classmethod def setUpTestData(cls): cls.user = User(username='admin', is_staff=True, is_active=True) cls.user.set_password('secret') cls.user.save() cls.author_ct = ContentType.objects.get_for_model(Author) cls.holder_ct = ContentType.objects.get_for_model(Holder2) cls.book_ct = ContentType.objects.get_for_model(Book) cls.inner_ct = ContentType.objects.get_for_model(Inner2) # User always has permissions to add and change Authors, and Holders, # the main (parent) models of the inlines. Permissions on the inlines # vary per test. permission = Permission.objects.get(codename='add_author', content_type=cls.author_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_author', content_type=cls.author_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='add_holder2', content_type=cls.holder_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_holder2', content_type=cls.holder_ct) cls.user.user_permissions.add(permission) author = Author.objects.create(pk=1, name='The Author') book = author.books.create(name='The inline Book') cls.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,)) # Get the ID of the automatically created intermediate model for the Author-Book m2m author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book) cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk cls.holder = Holder2.objects.create(dummy=13) cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder) def setUp(self): self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(self.holder.id,)) self.client.force_login(self.user) def test_inline_add_m2m_noperm(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_noperm(self): response = self.client.get(reverse('admin:admin_inlines_holder2_add')) # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_change_m2m_noperm(self): response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_change_fk_noperm(self): response = self.client.get(self.holder_change_url) # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_add_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_author_add')) # No change permission on Books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_holder2_add')) # Add permission on inner2s, so we get the inline self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="3" name="inner2_set-TOTAL_FORMS">', html=True) def test_inline_change_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') self.assertNotContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_m2m_change_perm(self): permission = Permission.objects.get(codename='change_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # We have change perm on books, so we can add/change/delete inlines self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains(response, 'Add another Author-book relationship') self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" ' 'value="4" name="Author_books-TOTAL_FORMS">', html=True) self.assertContains( response, '<input type="hidden" id="id_Author_books-0-id" value="%i" ' 'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id, html=True ) self.assertContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add permission on inner2s, so we can add but not modify existing self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') # 3 extra forms only, not the existing instance form self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" ' 'name="inner2_set-TOTAL_FORMS">', html=True ) self.assertNotContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) def test_inline_change_fk_change_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change permission on inner2s, so we can change existing but not add new self.assertContains(response, '<h2>Inner2s</h2>', count=2) # Just the one form for existing instances self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) # max-num 0 means we can't add new ones self.assertContains( response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS">', html=True ) # TabularInline self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) def test_inline_change_fk_add_change_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add/change perm, so we can add new and change existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance and three extra for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) def test_inline_change_fk_change_del_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change/delete perm on inner2s, so we can change/delete existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance only, no new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') def test_inline_change_fk_all_perms(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # All perms on inner2s, so we can add/change/delete self.assertContains(response, '<h2>Inner2s</h2>', count=2) # One form for existing instance only, three for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') # TabularInline self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) @override_settings(ROOT_URLCONF='admin_inlines.urls') class SeleniumTests(AdminSeleniumTestCase): available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps def setUp(self): User.objects.create_superuser(username='super', password='secret', email='super@example.com') def test_add_stackeds(self): """ The "Add another XXX" link correctly adds items to the stacked formset. """ self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4stacked_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() self.assertEqual(rows_length(), 4) def test_delete_stackeds(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4stacked_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() add_button.click() self.assertEqual(rows_length(), 5, msg="sanity check") for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id): delete_link.click() self.assertEqual(rows_length(), 3) def test_add_inlines(self): """ The "Add another XXX" link correctly adds items to the inline form. """ self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # There's only one inline to start with and it has the correct ID. self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')), 1) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[0].get_attribute('id'), 'profile_set-0') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1) # Add an inline self.selenium.find_element_by_link_text('Add another Profile').click() # The inline has been added, it has the right id, and it contains the # correct fields. self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1) # Let's add another one to be sure self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1) # Enter some data and click 'Save' self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1') self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2') self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1') self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2') self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1') self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2') self.selenium.find_element_by_xpath('//input[@value="Save"]').click() self.wait_page_loaded() # The objects have been created in the database self.assertEqual(ProfileCollection.objects.all().count(), 1) self.assertEqual(Profile.objects.all().count(), 3) def test_delete_inlines(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # Add a few inlines self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 5) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1) # Click on a few delete buttons self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click() self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click() # The rows are gone and the IDs have been re-sequenced self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 3) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) def test_alternating_rows(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # Add a few inlines self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() row_selector = 'form#profilecollection_form tr.dynamic-profile_set' self.assertEqual(len(self.selenium.find_elements_by_css_selector( "%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows") self.assertEqual(len(self.selenium.find_elements_by_css_selector( "%s.row2" % row_selector)), 1, msg="Expect one row2 styled row") def test_collapsed_inlines(self): # Collapsed inlines have SHOW/HIDE links. self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add')) # One field is in a stacked inline, other in a tabular one. test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title'] show_links = self.selenium.find_elements_by_link_text('SHOW') self.assertEqual(len(show_links), 3) for show_index, field_name in enumerate(test_fields, 0): self.wait_until_invisible(field_name) show_links[show_index].click() self.wait_until_visible(field_name) hide_links = self.selenium.find_elements_by_link_text('HIDE') self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields, 0): self.wait_until_visible(field_name) hide_links[hide_index].click() self.wait_until_invisible(field_name)
{ "content_hash": "9cbe9e675b9752a9e88b5208e12a05ce", "timestamp": "", "source": "github", "line_count": 979, "max_line_length": 117, "avg_line_length": 48.36465781409601, "alnum_prop": 0.641238463325519, "repo_name": "timgraham/django", "id": "0a1ab5acbbb4f0a29fc4e9d28c362373d2806240", "size": "47349", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "tests/admin_inlines/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "84974" }, { "name": "HTML", "bytes": "224563" }, { "name": "JavaScript", "bytes": "257097" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "12931531" }, { "name": "Shell", "bytes": "809" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
default_path = "../src/" import sys sys.path.insert(0, default_path) import unittest from helpers.helper_evaluation_tuning_tester import GetActual_FromIndex import logging logging.basicConfig(format='%(message)s',level=logging.WARN) class _Config: DEBUG = True SKIP_SLOW_TESTS = True SKIP_CONTROL_TESTS = False SKIP_MONTE_CARLO_TESTS = False @staticmethod def display(e): if _Config.DEBUG: sys.stderr.write(str(e)+" ... ") class PLOSOneEXP_Controls(unittest.TestCase, GetActual_FromIndex): def __init__(self, *args, **kwords): unittest.TestCase.__init__(self, *args, **kwords) GetActual_FromIndex.__init__(self) self._test_output_path = "test/test_outputs/" self._file_path = "../results/Control_91-92_March2017/" @unittest.skipIf(_Config.SKIP_CONTROL_TESTS, "Skipping control test..") def test_control_VAcc(self): actual = self._get_actual(m=2, e=3, filename="l91.csv", n=91, indexes_are_important=True, support_access=True) expected = 0.00340693210971 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_CONTROL_TESTS, "Skipping control test..") def test_control_VNoAcc(self): actual = self._get_actual(m=2, e=3, filename="l92.csv", n=92, indexes_are_important=True, support_access=False) expected = 0.000708533321995 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_CONTROL_TESTS, "Skipping control test..") def test_control_OddNoAcc(self): actual = self._get_actual(m=2, e=3, filename="odds.csv", n=46, indexes_are_important=True, support_access=False) expected = 0.00429237121612 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_CONTROL_TESTS, "Skipping control test..") def test_control_OddAcc(self): actual = self._get_actual(m=2, e=3, filename="odds44.csv", n=44, indexes_are_important=True, support_access=False) expected = 0.00762615128411 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_CONTROL_TESTS, "Skipping control test..") def test_control_EvenNoAcc(self): actual = self._get_actual(m=2, e=3, filename="evens.csv", n=46, indexes_are_important=True, support_access=False) expected = 0.00429237121612 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_CONTROL_TESTS, "Skipping control test..") def test_control_EvenAcc(self): actual = self._get_actual(m=2, e=3, filename="evens44.csv", n=44, indexes_are_important=True, support_access=False) expected = 0.00762615128411 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_SLOW_TESTS, "Skipping slow test..") def test_control_E10Acc(self): actual = self._get_actual(m=2, e=4, filename="edges_l3893.csv", n=2476, indexes_are_important=False, support_access=True) expected = 0.00548091787895 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_SLOW_TESTS, "Skipping slow test..") def test_control_E10NoAcc(self): actual = self._get_actual(m=2, e=4, filename="edges_l3991.csv", n=2522, indexes_are_important=False, support_access=False) expected = 0.000228102472245 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual class PLOSOneEXP_MonteCarlo(unittest.TestCase, GetActual_FromIndex): def __init__(self, *args, **kwords): unittest.TestCase.__init__(self, *args, **kwords) GetActual_FromIndex.__init__(self) self._file_path = "../results/installed_aos+rod_July2016/" @unittest.skipIf(_Config.SKIP_MONTE_CARLO_TESTS, "Skipping monte carlo test..") def test_monte_carlo_Inst(self): actual = self._get_actual(m=2, e=3, filename="installed_newlines_removed.csv", n=44, indexes_are_important=True, support_access=False) expected = 0.00591398315505 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual @unittest.skipIf(_Config.SKIP_MONTE_CARLO_TESTS, "Skipping monte carlo test..") def test_monte_carlo_Twkr(self): actual = self._get_actual(m=2, e=3, filename="tweaker.csv", n=44, indexes_are_important=True, support_access=False) expected = 0.00460301483137 _Config.display(expected) self.assertTrue( self._is_similar(actual, expected) ) # self.assertTrue(actual == expected) return actual if __name__ == "__main__": suite = unittest.TestLoader().loadTestsFromModule( sys.modules[__name__] ) unittest.TextTestRunner(verbosity=3).run(suite)
{ "content_hash": "a11354b924319c658641079300d99190", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 142, "avg_line_length": 46.80672268907563, "alnum_prop": 0.670197486535009, "repo_name": "LightStage-Aber/LightStage-Repo", "id": "98bbea94d8942600d58a59d8df4d156096373e34", "size": "5570", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/test_PLOS_ONE_Article_Control_MC_EXPS_May2017.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "12372" }, { "name": "Jupyter Notebook", "bytes": "1716600" }, { "name": "Makefile", "bytes": "90" }, { "name": "Python", "bytes": "408936" }, { "name": "Shell", "bytes": "19977" } ], "symlink_target": "" }
from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, NetworkThread from test_framework.bricktools import create_coinbase, create_brick, add_witness_commitment from test_framework.script import CScript from io import BytesIO import time NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" VB_TOP_BITS = 0x20000000 def trueDummy(tx): scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): assert(len(i) == 0) newscript.append(b'\x51') else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) tx.rehash() ''' This test is meant to exercise NULLDUMMY softfork. Connect to a single node. Generate 2 bricks (save the coinbases for later). Generate 427 more bricks. [Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th brick. [Policy] Check that non-NULLDUMMY transactions are rejected before activation. [Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st brick. [Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd brick. ''' class NULLDUMMYTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_wall = True def setup_network(self): # Must set the brickversion for this test self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[['-debug', '-whitelist=127.0.0.1', '-walletprematurewitness']]) def run_test(self): self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address]) self.wit_address = self.nodes[0].addwitnessaddress(self.address) self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address) NetworkThread().start() # Start up network handling in another thread self.coinbase_bricks = self.nodes[0].generate(2) # Brick 2 coinbase_txid = [] for i in self.coinbase_bricks: coinbase_txid.append(self.nodes[0].getbrick(i)['tx'][0]) self.nodes[0].generate(427) # Brick 429 self.lastbrickhash = self.nodes[0].getbestbrickhash() self.tip = int("0x" + self.lastbrickhash, 0) self.lastbrickheight = 429 self.lastbricktime = int(time.time()) + 429 print ("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)] txid1 = self.tx_submit(self.nodes[0], test1txs[0]) test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48)) txid2 = self.tx_submit(self.nodes[0], test1txs[1]) test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49)) txid3 = self.tx_submit(self.nodes[0], test1txs[2]) self.brick_submit(self.nodes[0], test1txs, False, True) print ("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 48) trueDummy(test2tx) txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR) print ("Test 3: Non-NULLDUMMY base transactions should be accepted in a brick before activation [431]") self.brick_submit(self.nodes[0], [test2tx], False, True) print ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 47) test6txs=[CTransaction(test4tx)] trueDummy(test4tx) self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR) self.brick_submit(self.nodes[0], [test4tx]) print ("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48) test6txs.append(CTransaction(test5tx)) test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' self.tx_submit(self.nodes[0], test5tx, NULLDUMMY_ERROR) self.brick_submit(self.nodes[0], [test5tx], True) print ("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in brick after activation [432]") for i in test6txs: self.tx_submit(self.nodes[0], i) self.brick_submit(self.nodes[0], test6txs, True, True, VB_TOP_BITS) def create_transaction(self, node, txid, to_address, amount): inputs = [{ "txid" : txid, "vout" : 0}] outputs = { to_address : amount } rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx) tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx def tx_submit(self, node, tx, msg = ""): tx.rehash() try: node.sendrawtransaction(bytes_to_hex_str(tx.serialize_with_witness()), True) except JSONRPCException as exp: assert_equal(exp.error["message"], msg) else: assert_equal('', msg) return tx.hash def brick_submit(self, node, txs, witness = False, accept = False, version=4): brick = create_brick(self.tip, create_coinbase(self.lastbrickheight + 1), self.lastbricktime + 1) brick.nVersion = version for tx in txs: tx.rehash() brick.vtx.append(tx) brick.hashMerkleRoot = brick.calc_merkle_root() witness and add_witness_commitment(brick) brick.rehash() brick.solve() node.submitbrick(bytes_to_hex_str(brick.serialize(True))) if (accept): assert_equal(node.getbestbrickhash(), brick.hash) self.tip = brick.sha256 self.lastbrickhash = brick.hash self.lastbricktime += 1 self.lastbrickheight += 1 else: assert_equal(node.getbestbrickhash(), self.lastbrickhash) if __name__ == '__main__': NULLDUMMYTest().main()
{ "content_hash": "3677f3f3c884eabe1f41f86935a2c0d3", "timestamp": "", "source": "github", "line_count": 144, "max_line_length": 137, "avg_line_length": 44.291666666666664, "alnum_prop": 0.6578864847914707, "repo_name": "magacoin/magacoin", "id": "2603fbf15cc6e1c34e3785ac4866543713185ca2", "size": "6588", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "qa/rpc-tests/nulldummy.py", "mode": "33261", "license": "mit", "language": [ { "name": "Assembly", "bytes": "28456" }, { "name": "C", "bytes": "696476" }, { "name": "C++", "bytes": "4589232" }, { "name": "CSS", "bytes": "1127" }, { "name": "HTML", "bytes": "50621" }, { "name": "Java", "bytes": "30290" }, { "name": "M4", "bytes": "185658" }, { "name": "Makefile", "bytes": "105693" }, { "name": "Objective-C", "bytes": "3892" }, { "name": "Objective-C++", "bytes": "7232" }, { "name": "Protocol Buffer", "bytes": "2328" }, { "name": "Python", "bytes": "1029872" }, { "name": "QMake", "bytes": "2020" }, { "name": "Roff", "bytes": "30536" }, { "name": "Shell", "bytes": "47182" } ], "symlink_target": "" }
import clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * walltypes = UnwrapElement(IN[0]) booleans = list() for item in walltypes: try: wallfunction = item.get_Parameter(BuiltInParameter.FUNCTION_PARAM).AsInteger() if wallfunction == 1: booleans.append(True) else: booleans.append(False) except: booleans.append(False) OUT = booleans
{ "content_hash": "5ba6e350fd1b4051f02cc0676a691ee6", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 80, "avg_line_length": 21.529411764705884, "alnum_prop": 0.7459016393442623, "repo_name": "andydandy74/ClockworkForDynamo", "id": "85c8edec937458d1f7de48895a687b4a92ef86bc", "size": "366", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "nodes/0.7.x/python/WallType.FunctionIsExterior.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "717382" } ], "symlink_target": "" }
import sqlalchemy import config, constants, util def textclause_repr(self): return 'text(%r)' % self.text def table_repr(self): data = { 'name': self.name, 'columns': constants.NLTAB.join([repr(cl) for cl in self.columns]), 'constraints': constants.NLTAB.join( [repr(cn) for cn in self.constraints if not isinstance(cn, sqlalchemy.PrimaryKeyConstraint)]), 'index': '', 'schema': self.schema != None and "schema='%s'" % self.schema or '', } if data['constraints']: data['constraints'] = data['constraints'] + ',' return util.as_out_str(constants.TABLE % data) def _repr_coltype_as(coltype, as_type): """repr a Type instance as a super type.""" specimen = object.__new__(as_type) specimen.__dict__ = coltype.__dict__ return repr(specimen) def column_repr(self): kwarg = [] if self.key != self.name: kwarg.append( 'key') if hasattr(self, 'primary_key'): kwarg.append( 'primary_key') if not self.nullable: kwarg.append( 'nullable') if self.onupdate: kwarg.append( 'onupdate') if self.default: kwarg.append( 'default') ks = ', '.join('%s=%r' % (k, getattr(self, k)) for k in kwarg ) name = self.name if not hasattr(config, 'options') and config.options.generictypes: coltype = repr(self.type) elif type(self.type).__module__ == 'sqlalchemy.types': coltype = repr(self.type) else: # Try to 'cast' this column type to a cross-platform type # from sqlalchemy.types, dropping any database-specific type # arguments. for base in type(self.type).__mro__: if (base.__module__ == 'sqlalchemy.types' and base.__name__ in sqlalchemy.__all__): coltype = _repr_coltype_as(self.type, base) break # FIXME: if a dialect has a non-standard type that does not # derive from an ANSI type, there's no choice but to ignore # generic-types and output the exact type. However, import # headers have already been output and lack the required # dialect import. else: coltype = repr(self.type) data = {'name': self.name, 'type': coltype, 'constraints': ', '.join([repr(cn) for cn in self.constraints]), 'args': ks and ks or '', } if data['constraints']: if data['constraints']: data['constraints'] = ', ' + data['constraints'] if data['args']: if data['args']: data['args'] = ', ' + data['args'] return util.as_out_str(constants.COLUMN % data) def foreignkeyconstraint_repr(self): data = {'name': repr(self.name), 'names': repr([x.parent.name for x in self.elements]), 'specs': repr([x._get_colspec() for x in self.elements]) } return util.as_out_str(constants.FOREIGN_KEY % data) def index_repr(index): cols = [] for column in index.columns: # FIXME: still punting on the issue of unicode table names if util.is_python_identifier(column.name): cols.append('%s.c.%s' % (column.table.name, column.name)) else: cols.append('%s.c[%r]' % (column.table.name, column.name)) data = {'name': repr(index.name), 'columns': ', '.join(cols), 'unique': repr(index.unique), } return util.as_out_str(constants.INDEX % data) def monkey_patch_sa(): sqlalchemy.sql.expression._TextClause.__repr__ = textclause_repr sqlalchemy.schema.Table.__repr__ = table_repr sqlalchemy.schema.Column.__repr__ = column_repr sqlalchemy.schema.ForeignKeyConstraint.__repr__ = foreignkeyconstraint_repr sqlalchemy.schema.Index.__repr__ = index_repr
{ "content_hash": "653b2aaf27057e2f8fd1f6e6ff002b90", "timestamp": "", "source": "github", "line_count": 109, "max_line_length": 80, "avg_line_length": 34.944954128440365, "alnum_prop": 0.5893935416119717, "repo_name": "DarioGT/SqlAutoCode-", "id": "54a27d129f360b782c668de6fbde1b14957d6b58", "size": "3809", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sqlautocode/formatter.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "55403" } ], "symlink_target": "" }
import sys from difflib import SequenceMatcher from functools import partial if sys.version_info[0] == 2: # from six.py range = xrange text_type = unicode binary_type = str else: binary_type = bytes text_type = str # CORE __version__ = '1' def ratio(a, b): """ similarity ratio between two strings """ return matcher(a, b).ratio() def main_ratio(a, b): """ find the longest match between the original strings pass the longest match and the shortest original string to ratio() """ longest_match = matcher(a, b).find_longest_match(0, len(a), 0, len(b)) longest_match = a[longest_match.a:longest_match.a + longest_match.size] if len(a) < len(b): shortest_original = a else: shortest_original = b return ratio(longest_match, shortest_original) def partial_ratio(a, b): """ using only non zero length matching blocks select everything from the first block to the last pass the results through ratio """ matches = matcher(a, b).get_matching_blocks() matches = tuple(filter(lambda m: m.size > 0, matches)) if len(matches) == 0: return 0 matches = sorted(matches, key=lambda m: m.a) part_a = a[matches[0].a:matches[-1].a + matches[-1].size] matches = sorted(matches, key=lambda m: m.b) part_b = b[matches[0].b:matches[-1].b + matches[-1].size] return ratio(part_a, part_b) def token_ratio(a, b, sep=' '): """ each part is split() with sep the parts are sorted joined back together the results are passed through ratio() """ a = sep.join(sorted(set(a.split(sep)))) b = sep.join(sorted(set(b.split(sep)))) return ratio(a, b) def iratio(a, b): """ case insensitive version of ratio() """ return ratio(a.lower(), b.lower()) def main_iratio(a, b): """ case insensitive version of main_ratio() """ return main_ratio(a.lower(), b.lower()) def partial_iratio(a, b): """ case insensitive version of partial_ratio() """ return partial_ratio(a.lower(), b.lower()) def token_iratio(a, b): """ case insensitive version of token_ratio() """ return token_ratio(a.lower(), b.lower()) matcher = partial(SequenceMatcher, None) # AMENITIES def all_text(i): """ convert all items to text None becomes an empty string """ return (text_type('') if e is None else text_type(e) for e in i) def ratio_to_percentile(ratio): """ converts to percentile integer """ return int(round(100 * ratio))
{ "content_hash": "daa40d1129a8a5fc116119ae01b54113", "timestamp": "", "source": "github", "line_count": 115, "max_line_length": 75, "avg_line_length": 22.982608695652175, "alnum_prop": 0.6027241770715096, "repo_name": "medecau/fuzzywuzzy", "id": "29d9a081cc6070ea6e2098e409311d3ea345984a", "size": "2666", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fuzzywuzzy.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "5982" } ], "symlink_target": "" }
from matplotlib import use, cm use('TkAgg') import numpy as np import scipy.io import scipy.misc import matplotlib.pyplot as plt from findClosestCentroids import findClosestCentroids from computeCentroids import computeCentroids from runkMeans import runkMeans from kMeansInitCentroids import kMeansInitCentroids from show import show print 'Finding closest centroids.' # Load an example dataset that we will be using data = scipy.io.loadmat('ex7data2.mat') X = data['X'] # Select an initial set of centroids K = 3 # 3 Centroids initial_centroids = np.array([[3, 3], [6, 2], [8, 5]]) # Find the closest centroids for the examples using the # initial_centroids val, idx = findClosestCentroids(X, initial_centroids) print 'Closest centroids for the first 3 examples:' print idx[0:3].tolist() print '(the closest centroids should be 0, 2, 1 respectively)' raw_input("Program paused. Press Enter to continue...") ## ===================== Part 2: Compute Means ========================= # After implementing the closest centroids function, you should now # complete the computeCentroids function. # print 'Computing centroids means.' # Compute means based on the closest centroids found in the previous part. centroids = computeCentroids(X, idx, K) print 'Centroids computed after initial finding of closest centroids:' for c in centroids: print c print '(the centroids should be' print ' [ 2.428301 3.157924 ]' print ' [ 5.813503 2.633656 ]' print ' [ 7.119387 3.616684 ]' raw_input("Program paused. Press Enter to continue...") ## =================== Part 3: K-Means Clustering ====================== # After you have completed the two functions computeCentroids and # findClosestCentroids, you have all the necessary pieces to run the # kMeans algorithm. In this part, you will run the K-Means algorithm on # the example dataset we have provided. # print 'Running K-Means clustering on example dataset.' # Load an example dataset data = scipy.io.loadmat('ex7data2.mat') X = data['X'] # Settings for running K-Means K = 3 max_iters = 10 # For consistency, here we set centroids to specific values # but in practice you want to generate them automatically, such as by # settings them to be random examples (as can be seen in # kMeansInitCentroids). initial_centroids = [[3, 3], [6, 2], [8, 5]] # Run K-Means algorithm. The 'true' at the end tells our function to plot # the progress of K-Means centroids, idx = runkMeans(X, initial_centroids, max_iters, True) print 'K-Means Done.' raw_input("Program paused. Press Enter to continue...") ## ============= Part 4: K-Means Clustering on Pixels =============== # In this exercise, you will use K-Means to compress an image. To do this, # you will first run K-Means on the colors of the pixels in the image and # then you will map each pixel on to it's closest centroid. # # You should now complete the code in kMeansInitCentroids.m # print 'Running K-Means clustering on pixels from an image.' # Load an image of a bird A = scipy.misc.imread('bird_small.png') # If imread does not work for you, you can try instead # load ('bird_small.mat') A = A / 255.0 # Divide by 255 so that all values are in the range 0 - 1 # Size of the image img_size = A.shape # Reshape the image into an Nx3 matrix where N = number of pixels. # Each row will contain the Red, Green and Blue pixel values # This gives us our dataset matrix X that we will use K-Means on. X = A.reshape(img_size[0] * img_size[1], 3) # Run your K-Means algorithm on this data # You should try different values of K and max_iters here K = 16 max_iters = 10 # When using K-Means, it is important the initialize the centroids # randomly. # You should complete the code in kMeansInitCentroids.m before proceeding initial_centroids = kMeansInitCentroids(X, K) # Run K-Means centroids, idx = runkMeans(X, initial_centroids, max_iters) raw_input("Program paused. Press Enter to continue...") ## ================= Part 5: Image Compression ====================== # In this part of the exercise, you will use the clusters of K-Means to # compress an image. To do this, we first find the closest clusters for # each example. After that, we print 'Applying K-Means to compress an image.' # Find closest cluster members _, idx = findClosestCentroids(X, centroids) # Essentially, now we have represented the image X as in terms of the # indices in idx. # We can now recover the image from the indices (idx) by mapping each pixel # (specified by it's index in idx) to the centroid value X_recovered = np.array([centroids[e] for e in idx]) # Reshape the recovered image into proper dimensions X_recovered = X_recovered.reshape(img_size[0], img_size[1], 3) # Display the original image plt.subplot(1, 2, 1) plt.imshow(A) plt.title('Original') show() # Display compressed image side by side plt.subplot(1, 2, 2) plt.imshow(X_recovered) plt.title('Compressed, with %d colors.' % K) show() raw_input("Program paused. Press Enter to continue...")
{ "content_hash": "8b94be39527c6548adff7b39d85e79bc", "timestamp": "", "source": "github", "line_count": 158, "max_line_length": 75, "avg_line_length": 31.670886075949365, "alnum_prop": 0.7142286171063149, "repo_name": "jrbadiabo/Coursera-Stanford-ML-Class", "id": "803f45ec24b67e8b96bd46a03591a307b7a032ea", "size": "5838", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Python_Version/Ex7.K-Means_PCA_-_Clustering/ex7.py", "mode": "33188", "license": "mit", "language": [ { "name": "Matlab", "bytes": "255316" } ], "symlink_target": "" }
from __future__ import unicode_literals import json from abc import ABCMeta from collections import OrderedDict from functools import wraps import sys from os.path import dirname, abspath # just until ransom becomes its own package sys.path.append(dirname(dirname(abspath(__file__)))) import ransom from params import SingleParam, StaticParam from models import get_unique_func, get_priority_func from utils import (PriorityQueue, MaxInt, chunked_iter, make_type_wrapper, OperationExample) # TODO: handle automatic redirecting better # TODO: support batching and optimization limits # TODO: concurrency. get_current_task() -> get_current_tasks() # TODO: wrap exceptions # TODO: separate structure for saving completed subops (for debugging?) # TODO: WebRequestOperation: accepts URL, action (default: GET) # TODO: Model links (url attribute) # TODO: support field param_type (for cases with ints and strs) # TODO: use source descriptor instead of api_url? (for op.source) # TODO: check that subop_chain types match up # TODO: check that priority attribute exists on output_type where applicable """ - what if operations were iterable over their results and process() returned the operation itself? (more expensive to iterate and find non-dupe results, would set ops help?) - client -> root_owner. parent operation (client if no parent op) -> owner. - pregenerate MediawikiCalls/URLs on QueryOperations Operation modifiers: - Prioritized - Recursive - Buffered fun metadata: - operations executed - suboperations skipped (from dedupe/prioritization/laziness) - web requests executed, kb downloaded retry strategies: - absolute number of failures - streaks/runs of failures - fail if first operation fails - reduce batch size/query limit on timeouts prioritization/batching/concurrency implementation thoughts: - hands-off implementation via multiplexing? - separate priority queues for params and suboperations? - fancy new datastructure with dedupe + priority queueing built-in - buffering: do 3/5/10 GetCategoryInfos before fetching member pages - early subop production based on next parameter priority sinking below a certain threshold? (e.g., next param's subcats=5 -> fetch more category infos) """ DEFAULT_API_URL = 'http://en.wikipedia.org/w/api.php' DEFAULT_BASE_URL = 'http://en.wikipedia.org/wiki/' DEFAULT_HEADERS = {'User-Agent': ('Wapiti/0.0.0 Mahmoud Hashemi' ' mahmoudrhashemi@gmail.com') } ALL = MaxInt('ALL') DEFAULT_MIN = 50 class WapitiException(Exception): pass class NoMoreResults(Exception): pass DEFAULT_WEB_CLIENT = ransom.Client({'headers': DEFAULT_HEADERS}) class MockClient(object): def __init__(self, is_bot=False, **kwargs): self.debug = kwargs.pop('debug', False) self.web_client = DEFAULT_WEB_CLIENT self.api_url = DEFAULT_API_URL self.is_bot = is_bot DEFAULT_CLIENT = MockClient() Tune = make_type_wrapper('Tune', [('priority', None), ('buffer', None)]) Recursive = make_type_wrapper('Recursive', [('is_recursive', True)]) def get_unwrapped_options(wr_type): try: return dict(wr_type._wrapped_dict), wr_type._wrapped except AttributeError: return {}, wr_type class LimitSpec(object): def __init__(self, _max, bot_max=None): self.max = int(_max) self.bot_max = bot_max or (self.max * 10) def get_limit(self, is_bot=False): if is_bot: return self.bot_max return self.max def __int__(self): return self.max class ParamLimit(LimitSpec): pass class QueryLimit(LimitSpec): def __init__(self, _max, bot_max=None, mw_default=None, _min=None): super(QueryLimit, self).__init__(_max, bot_max) self.mw_default = mw_default if _min is None: _min = DEFAULT_MIN self.min = min(self.max, _min) PL_50_500 = ParamLimit(50, 500) QL_50_500 = QueryLimit(50, 500, 10) DEFAULT_QUERY_LIMIT = QL_500_5000 = QueryLimit(500, 5000, 10) def get_inputless_init(old_init): """ Used for Operations like get_random() which don't take an input parameter. """ if getattr(old_init, '_is_inputless', None): return old_init @wraps(old_init) def inputless_init(self, limit=None, **kw): kw['input_param'] = None return old_init(self, limit=limit, **kw) inputless_init._is_inputless = True return inputless_init def get_field_str(field): out_str = field.key mods = [] if field.required: mods.append('required') if field.multi: mods.append('multi') if mods: out_str += ' (%s)' % ', '.join(mods) return out_str def operation_signature_doc(operation): if operation.input_field is None: doc_input = 'None' else: doc_input = operation.input_field.key doc_output = operation.singular_output_type.__name__ doc_template = 'Input: %s\n' if operation.is_bijective: doc_template += 'Output: %s\n' else: doc_template += 'Output: List of %s\n' print_fields = [f for f in getattr(operation, 'fields', []) if not isinstance(f, StaticParam)] if print_fields: doc_template += 'Options: ' doc_template += ','.join([get_field_str(f) for f in print_fields]) + '\n' if hasattr(operation, 'examples'): doc_template += 'Examples: \n\t' doc_template += '\n\t'.join([repr(x) for x in operation.examples]) + '\n' return doc_template % (doc_input, doc_output) class OperationMeta(ABCMeta): _all_ops = [] def __new__(cls, name, bases, attrs): ret = super(OperationMeta, cls).__new__(cls, name, bases, attrs) if name == 'Operation' or name == 'QueryOperation': return ret # TODO: add elegance? subop_chain = getattr(ret, 'subop_chain', []) try: input_field = ret.input_field except AttributeError: input_field = subop_chain[0].input_field ret.input_field = input_field if input_field is None: ret.__init__ = get_inputless_init(ret.__init__) else: input_field.required = True # TODO: run through subop_chain, checking the outputs match up try: output_type = ret.output_type except AttributeError: output_type = subop_chain[-1].singular_output_type for st in subop_chain: if not st.is_bijective: output_type = [output_type] break ret.output_type = output_type try: ret.singular_output_type = ret.output_type[0] except (TypeError, IndexError): ret.singular_output_type = ret.output_type # TODO: support manual overrides for the following? ret.is_multiargument = getattr(input_field, 'multi', False) ret.is_bijective = True if type(output_type) is list and output_type: ret.is_bijective = False for ex in getattr(ret, 'examples', []): ex.bind_op_type(ret) ret.__doc__ = (ret.__doc__ and ret.__doc__ + '\n') or '' ret.__doc__ += operation_signature_doc(ret) cls._all_ops.append(ret) return ret @property def help_str(self): ret = '\n\t'.join([self.__name__] + self.__doc__.strip().split('\n')) # TODO move options and examples to the __doc__ ret += '\n' return ret class OperationQueue(object): # TODO: chunking/batching should probably happen here # with the assistance of another queue for prioritized params # (i.e., don't create subops so eagerly) def __init__(self, qid, op_type, default_limit=ALL): self.qid = qid options, unwrapped = get_unwrapped_options(op_type) self.op_type = op_type self.unwrapped_type = unwrapped self.options = options self.unique_key = options.get('unique_key', 'unique_key') self.unique_func = get_unique_func(self.unique_key) self.priority = options.get('priority', 0) self.priority_func = get_priority_func(self.priority) self.default_limit = default_limit self.param_set = set() self.op_queue = PriorityQueue() self._dup_params = [] def enqueue(self, param, **kw): unique_key = self.unique_func(param) if unique_key in self.param_set: self._dup_params.append(unique_key) return priority = self.priority_func(param) kwargs = {'limit': self.default_limit} kwargs.update(kw) new_subop = self.op_type(param, **kwargs) new_subop._origin_queue = self.qid self.op_queue.add(new_subop, priority) self.param_set.add(unique_key) def enqueue_many(self, param_list, **kw): for param in param_list: self.enqueue(param, **kw) return def __len__(self): return len(self.op_queue) def peek(self, *a, **kw): return self.op_queue.peek(*a, **kw) def pop(self, *a, **kw): return self.op_queue.pop(*a, **kw) class Operation(object): """ An abstract class connoting some semblance of statefulness and introspection (e.g., progress monitoring). """ __metaclass__ = OperationMeta subop_chain = [] def __init__(self, input_param, limit=None, **kw): self.client = kw.pop('client', None) if self.client is None: self.client = DEFAULT_CLIENT self.api_url = self.client.api_url self.is_bot_op = self.client.is_bot self.set_input_param(input_param) self.set_limit(limit) self.kwargs = kw self.started = False self.results = OrderedDict() subop_queues = [OperationQueue(0, type(self))] if self.subop_chain: subop_queues.extend([OperationQueue(i + 1, st) for i, st in enumerate(self.subop_chain)]) subop_queues[1].enqueue_many(self.input_param_list, client=self.client) self.subop_queues = subop_queues def get_progress(self): return len(self.results) def get_relative_progress(self): if self.limit and self.limit is not ALL: return len(self.results) / float(self.limit) return 0.0 def set_input_param(self, param): self._orig_input_param = self._input_param = param if self.input_field: self._input_param = self.input_field.get_value(param) self._input_param_list = self.input_field.get_value_list(param) else: self._input_param = None self._input_param_list = [] # TODO: necessary? @property def input_param(self): return self._input_param @property def input_param_list(self): return self._input_param_list @property def source(self): return self.api_url def set_limit(self, limit): # TODO: add support for callable limit getters? self._orig_limit = limit if isinstance(limit, Operation): self.parent = limit if self.is_bijective and self.input_field: limit = len(self.input_param_list) self._limit = limit @property def limit(self): if isinstance(self._limit, Operation): return self._limit.remaining return self._limit @property def remaining(self): limit = self.limit if limit is None: limit = ALL return max(0, limit - len(self.results)) def process(self): self.started = True task = self.get_current_task() if self.client.debug: print self.__class__.__name__, self.remaining if task is None: raise NoMoreResults() elif isinstance(task, Operation): results = task.process() elif callable(task): # not actually used results = task() else: msg = 'task expected as Operation or callable, not: %r' % task raise TypeError(msg) # TODO: check resp for api errors/warnings # TODO: check for unrecognized parameter values new_results = self.store_results(task, results) return new_results def get_current_task(self): if not self.remaining: return None for subop_queue in reversed(self.subop_queues): while subop_queue: subop = subop_queue.peek() if subop.remaining: return subop else: subop_queue.pop() return None def store_results(self, task, results): new_res = [] oqi = getattr(task, '_origin_queue', None) if oqi is None: return self._update_results(results) dqi = oqi + 1 origin_queue = self.subop_queues[oqi] is_recursive = origin_queue.options.get('is_recursive') if is_recursive: origin_queue.enqueue_many(results, client=self.client) if dqi < len(self.subop_queues): dest_queue = self.subop_queues[dqi] dest_queue.enqueue_many(results, client=self.client) else: new_res = self._update_results(results) return new_res def _update_results(self, results): ret = [] filt_exists = self.kwargs.get('exists') filt_exists = filt_exists if filt_exists is None else bool(filt_exists) for res in results: if not self.remaining: break if filt_exists is not None and res.exists is not filt_exists: continue unique_key = getattr(res, 'unique_key', res) if unique_key in self.results: continue self.results[unique_key] = res ret.append(res) return ret def process_all(self): while 1: # TODO: +retry behavior try: self.process() except NoMoreResults: break return self.results.values() __call__ = process_all def __repr__(self): cn = self.__class__.__name__ if self.input_field is None: return '%s(limit=%r)' % (cn, self.limit) tmpl = '%s(%s, limit=%r)' # add dynamic-limity stuff try: ip_disp = repr(self.input_param) except: ip_disp = "'(unprintable param)'" return tmpl % (cn, ip_disp, self.limit) class QueryOperation(Operation): api_action = 'query' field_prefix = None # e.g., 'gcm' cont_str_key = None per_query_limit = DEFAULT_QUERY_LIMIT default_limit = ALL def __init__(self, input_param, limit=None, **kw): if limit is None: limit = self.default_limit super(QueryOperation, self).__init__(input_param, limit, **kw) self.cont_strs = [] self._set_params() if self.is_bijective and self.input_param and \ len(self.input_param_list) > self.per_query_param_limit: self.is_multiplexing = True self._setup_multiplexing() else: self.is_multiplexing = False def _set_params(self): is_bot_op = self.is_bot_op params = {} for field in self.fields: pref_key = field.get_key(self.field_prefix) kw_val = self.kwargs.get(field.key) params[pref_key] = field.get_value(kw_val) if self.input_field: qp_key_pref = self.input_field.get_key(self.field_prefix) qp_val = self.input_field.get_value(self.input_param) params[qp_key_pref] = qp_val field_limit = self.input_field.limit or PL_50_500 try: pq_pl = field_limit.get_limit(is_bot_op) except AttributeError: pq_pl = int(field_limit) self.per_query_param_limit = pq_pl self.params = params try: per_query_limit = self.per_query_limit.get_limit(is_bot_op) except AttributeError: per_query_limit = int(self.per_query_limit) self.per_query_limit = per_query_limit return def _setup_multiplexing(self): subop_queue = self.subop_queues[0] chunk_size = self.per_query_param_limit for chunk in chunked_iter(self.input_param_list, chunk_size): subop_queue.enqueue(tuple(chunk), client=self.client) # TODO return @property def current_limit(self): ret = self.remaining if not self.is_bijective: ret = max(DEFAULT_MIN, ret) ret = min(ret, self.per_query_limit) return ret @property def remaining(self): if self.is_depleted: return 0 return super(QueryOperation, self).remaining @property def last_cont_str(self): if not self.cont_strs: return None return self.cont_strs[-1] @property def is_depleted(self): if self.cont_strs and self.last_cont_str is None: return True return False @classmethod def get_field_dict(cls): ret = dict([(f.get_key(cls.field_prefix), f) for f in cls.fields]) if cls.input_field: query_key = cls.input_field.get_key(cls.field_prefix) ret[query_key] = cls.input_field return ret def get_current_task(self): if self.is_multiplexing: return super(QueryOperation, self).get_current_task() if not self.remaining: return None params = self.prepare_params(**self.kwargs) mw_call = MediaWikiCall(params, client=self.client) return mw_call def prepare_params(self, **kw): params = dict(self.params) if not self.is_bijective: params[self.field_prefix + 'limit'] = self.current_limit if self.last_cont_str: params[self.cont_str_key] = self.last_cont_str params['action'] = self.api_action return params def post_process_response(self, response): """ Used to rectify inconsistencies in API responses (looking at you, Feedback API) """ return response.results.get(self.api_action) def extract_results(self, resp): raise NotImplementedError('inheriting classes should return' ' a list of results from the response') def get_cont_str(self, resp): qc_val = resp.results.get(self.api_action + '-continue') if qc_val is None: return None for key in ('generator', 'prop', 'list'): if key in self.params: next_key = self.params[key] break else: raise KeyError("couldn't find contstr") if not self.cont_str_key: self.cont_str_key = qc_val[next_key].keys()[0] return qc_val[next_key][self.cont_str_key] def store_results(self, task, resp): if self.is_multiplexing: return super(QueryOperation, self).store_results(task, resp) if resp.notices: # TODO: lift this self._notices = list(resp.notices) self._url = resp.url print "may have an error: %r (%r)" % (resp.notices, resp.url) processed_resp = self.post_process_response(resp) if processed_resp is None: new_cont_str = self.get_cont_str(resp) # TODO: DRY this. self.cont_strs.append(new_cont_str) return [] # TODO: keep an eye on this try: new_results = self.extract_results(processed_resp) except Exception: raise super(QueryOperation, self).store_results(task, new_results) new_cont_str = self.get_cont_str(resp) self.cont_strs.append(new_cont_str) return new_results BASE_API_PARAMS = {'format': 'json', 'servedby': 'true'} class MediaWikiCall(Operation): """ Sets up actual API HTTP request, makes the request, encapsulates error handling, and stores results. """ input_field = SingleParam('url_params') # param_type=dict) output_type = Operation _limit = 1 def __init__(self, params, **kw): # These settings will all go on the WapitiClient self.raise_exc = kw.pop('raise_exc', True) self.raise_err = kw.pop('raise_err', True) self.raise_warn = kw.pop('raise_warn', False) self.client = kw.pop('client') self.web_client = getattr(self.client, 'web_client', DEFAULT_WEB_CLIENT) if kw: raise ValueError('got unexpected keyword arguments: %r' % kw.keys()) self.api_url = self.client.api_url params = params or {} self.params = dict(BASE_API_PARAMS) self.params.update(params) self.action = params['action'] self.url = '' self.results = None self.servedby = None self.exception = None self.error = None self.error_code = None self.warnings = [] self._input_param = params def process(self): # TODO: add URL to all exceptions resp = None try: resp = self.web_client.get(self.api_url, self.params) except Exception as e: # TODO: log self.exception = e # TODO: wrap if self.raise_exc: raise return self finally: self.url = getattr(resp, 'url', '') try: self.results = json.loads(resp.text) except Exception as e: self.exception = e # TODO: wrap if self.raise_exc: raise return self self.servedby = self.results.get('servedby') error = self.results.get('error') if error: self.error = error.get('info') self.error_code = error.get('code') warnings = self.results.get('warnings', {}) for mod_name, warn_dict in warnings.items(): warn_str = '%s: %s' % (mod_name, warn_dict.get('*', warn_dict)) self.warnings.append(warn_str) if self.error and self.raise_err: raise WapitiException(self.error_code) if self.warnings and self.raise_warn: raise WapitiException('warnings: %r' % self.warnings) return self @property def notices(self): ret = [] if self.exception: ret.append(self.exception) if self.error: ret.append(self.error) if self.warnings: ret.extend(self.warnings) return ret @property def remaining(self): if self.done: return 0 return 1 class WebRequestOperation(Operation): input_field = SingleParam('url') output_type = Operation _limit = 1 def __init__(self, input_param, **kw): self.client = kw.pop('client', None) self.web_client = getattr(self.client, 'web_client', DEFAULT_WEB_CLIENT) self.action = kw.pop('action', 'get') self.raise_exc = kw.pop('raise_exc', True) if kw: raise ValueError('got unexpected keyword arguments: %r' % kw.keys()) self.set_input_param(input_param) self.url = self._input_param self.kwargs = kw self.results = {} def process(self): resp = None try: resp = self.web_client.req(self.action, self.url) except Exception as e: self.exception = e if self.raise_exc: raise return self self.results[self.url] = resp.text raise NoMoreResults() #return self class GetPageHTML(Operation): input_field = SingleParam('title') examples = [OperationExample('Africa', limit=1)] output_type = Operation _limit = 1 def __init__(self, *a, **kw): super(GetPageHTML, self).__init__(*a, **kw) self.web_client = getattr(self.client, 'web_client', DEFAULT_WEB_CLIENT) self.raise_exc = kw.pop('raise_exc', True) source_info = getattr(self.client, 'source_info', None) if source_info: main_title = source_info.mainpage main_url = source_info.base self.base_url = main_url[:-len(main_title)] else: self.base_url = DEFAULT_BASE_URL self.url = self.base_url + self.input_param self.results = {} def process(self): try: resp = self.web_client.get(self.url) except Exception as e: self.exception = e if self.raise_exc: raise return self self.results[self.url] = resp.text raise NoMoreResults()
{ "content_hash": "0ac17bb1637625615dd7229d538a08bc", "timestamp": "", "source": "github", "line_count": 791, "max_line_length": 81, "avg_line_length": 31.852085967130215, "alnum_prop": 0.580115102202818, "repo_name": "mahmoud/wapiti", "id": "9915939597a35331e613b5540234bb19d9c0a6b6", "size": "25219", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wapiti/operations/base.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "146918" } ], "symlink_target": "" }
def activity_post_save_doc_template_values(url_root): """ Show documentation about activityPostSave """ required_query_parameter_list = [ { 'name': 'voter_device_id', 'value': 'string', # boolean, integer, long, string 'description': 'An 88 character unique identifier linked to a voter record on the server', }, { 'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string 'description': 'The unique key provided to any organization using the WeVoteServer APIs', }, ] optional_query_parameter_list = [ { 'name': 'statement_text', 'value': 'string', # boolean, integer, long, string 'description': 'A text comment.', }, { 'name': 'visibility_setting', 'value': 'string', # boolean, integer, long, string 'description': 'Two values are currently supported: \'FRIENDS_ONLY\' or \'SHOW_PUBLIC\'.', }, ] potential_status_codes_list = [ { 'code': 'VALID_VOTER_DEVICE_ID_MISSING', 'description': 'Cannot proceed. A valid voter_device_id parameter was not included.', }, { 'code': 'VALID_VOTER_ID_MISSING', 'description': 'Cannot proceed. A valid voter_id was not found.', }, ] try_now_link_variables_dict = { # 'organization_we_vote_id': 'wv85org1', } api_response = '{\n' \ ' "status": string,\n' \ ' "success": boolean,\n' \ ' "date_created": string,\n' \ ' "date_last_changed": string,\n' \ ' "date_of_notice": string,\n' \ ' "id": integer,\n' \ ' "activity_post_id": integer,\n' \ ' "kind_of_activity": string,\n' \ ' "kind_of_seed": string,\n' \ ' "new_positions_entered_count": integer,\n' \ ' "position_we_vote_id_list": list,\n' \ ' "speaker_name": string,\n' \ ' "speaker_organization_we_vote_id": string,\n' \ ' "speaker_voter_we_vote_id": string,\n' \ ' "speaker_profile_image_url_medium": string,\n' \ ' "speaker_profile_image_url_tiny": string,\n' \ ' "speaker_twitter_handle": string,\n' \ ' "speaker_twitter_followers_count": number,\n' \ ' "statement_text": string,\n' \ ' "visibility_is_public": boolean,\n' \ '}' template_values = { 'api_name': 'activityPostSave', 'api_slug': 'activityPostSave', 'api_introduction': "Save a new comment posted to the news feed.", 'try_now_link': 'apis_v1:activityPostSaveView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': "", 'potential_status_codes_list': potential_status_codes_list, } return template_values
{ "content_hash": "7ef26c5d29f0fb634f7ac3689a621268", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 115, "avg_line_length": 42.11904761904762, "alnum_prop": 0.4937817976257773, "repo_name": "wevote/WeVoteServer", "id": "5d9504a433e6c9856397920b81219be894f21ee5", "size": "3659", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "apis_v1/documentation_source/activity_post_save_doc.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3612" }, { "name": "HTML", "bytes": "1559624" }, { "name": "JavaScript", "bytes": "26822" }, { "name": "Procfile", "bytes": "51" }, { "name": "Python", "bytes": "11943600" }, { "name": "Shell", "bytes": "587" } ], "symlink_target": "" }
import os import re import sys from setuptools import find_packages, setup from cx_Freeze import setup, Executable # noqa re-import setup from wsgidav import __version__ # Check for Windows MSI Setup if "bdist_msi" not in sys.argv: # or len(sys.argv) != 2: raise RuntimeError( "This setup.py variant is only for creating 'bdist_msi' targets: {}\n" "Example `{} bdist_msi`".format(sys.argv, sys.argv[0]) ) org_version = __version__ # 'setup.py upload' fails on Vista, because .pypirc is searched on 'HOME' path # if "HOME" not in os.environ and "HOMEPATH" in os.environ: # os.environ.setdefault("HOME", os.environ.get("HOMEPATH", "")) # print("Initializing HOME environment variable to '{}'".format(os.environ["HOME"])) # Since we included pywin32 extensions, cx_Freeze tries to create a # version resource. This only supports the 'a.b.c[.d]' format. # Our version has either the for '1.2.3' or '1.2.3-a1' unsafe_version = False major, minor, patch = org_version.split(".", 3) major = int(major) minor = int(minor) if "-" in patch: # We have a pre-release version, e.g. '1.2.3-a1'. # This is presumably a post-release increment after '1.2.2' release. # It must NOT be converted to '1.2.3.1', since that would be *greater* # than '1.2.3', which is not even released yet. # Approach 1: # We cannot guarantee that '1.2.2.1' is correct either, so for # pre-releases we assume '0.0.0.0': # major = minor = patch = alpha = 0 # Approach 2: # '1.2.3-a1' was presumably a post-release increment after '1.2.2', # so assume '1.2.2.1': patch, alpha = patch.split("-", 1) patch = int(patch) # Remove leading letters alpha = re.sub("^[a-zA-Z]+", "", alpha) alpha = int(alpha) if unsafe_version and patch >= 1: patch -= 1 # 1.2.3-a1 => 1.2.2.1 else: # may be 1.2.0-a1 or 2.0.0-a1: we don't know what the previous release was major = minor = patch = alpha = 0 else: patch = int(patch) alpha = 0 version = "{}.{}.{}.{}".format(major, minor, patch, alpha) print("Version {}, using {}".format(org_version, version)) try: readme = open("README.md", "rt").read() except IOError: readme = "(readme not found. Running from tox/setup.py test?)" # These dependencies are for plain WsgiDAV: # NOTE: Only need to list requirements that are not discoverable by scanning # the main package. For example due to dynamic or optional imports. # Also, cx_Freeze may have difficulties with packages listed here, e.g. PpyYAML: # https://github.com/marcelotduarte/cx_Freeze/issues/1541 install_requires = [] # ... The Windows MSI Setup should include lxml and CherryPy install_requires.extend( [ "cheroot", "lxml", ] ) setup_requires = install_requires tests_require = [] executables = [ Executable( script="wsgidav/server/server_cli.py", base=None, # base="Win32GUI", target_name="wsgidav.exe", icon="docs/source/logo.ico", shortcut_name="WsgiDAV", copyright="(c) 2009-2022 Martin Wendt", # trademarks="...", ) ] # See https://cx-freeze.readthedocs.io/en/latest/distutils.html#build-exe build_exe_options = { "includes": install_requires, # "include_files": [], "packages": [ "asyncio", # https://stackoverflow.com/a/41881598/19166 "cheroot", "dbm", "wsgidav.dir_browser", "wsgidav.dc.nt_dc", ], "excludes": [ "tkinter", ], "constants": "BUILD_COPYRIGHT='(c) 2009-2022 Martin Wendt'", # "init_script": "Console", "include_msvcr": True, } # See https://cx-freeze.readthedocs.io/en/latest/distutils.html#bdist-msi bdist_msi_options = { "upgrade_code": "{92F74137-38D1-48F6-9730-D5128C8B611E}", "add_to_path": True, # "all_users": True, "install_icon": "docs/source/logo.ico", # "summary_data": {"author": "Martin Wendt"}, } setup( name="WsgiDAV", version=version, author="Martin Wendt", author_email="wsgidav@wwwendt.de", maintainer="Martin Wendt", maintainer_email="wsgidav@wwwendt.de", url="https://github.com/mar10/wsgidav/", description="Generic and extendable WebDAV server based on WSGI", long_description=readme, long_description_content_type="text/markdown", classifiers=[], # not required for this build-only setup config keywords="web wsgi webdav application server", license="MIT", packages=find_packages(exclude=["tests"]), package_data={ # If any package contains *.txt files, include them: # "": ["*.css", "*.html", "*.ico", "*.js"], "wsgidav.dir_browser": ["htdocs/*.*"] }, install_requires=install_requires, setup_requires=setup_requires, tests_require=tests_require, py_modules=[], zip_safe=False, extras_require={}, # cmdclass={"test": ToxCommand, "sphinx": SphinxCommand}, entry_points={"console_scripts": ["wsgidav = wsgidav.server.server_cli:run"]}, options={ "build_exe": build_exe_options, "bdist_msi": bdist_msi_options, }, # Used by cx_Freeze: executables=executables, )
{ "content_hash": "f5e0471aa460b332899cbdb2acf79481", "timestamp": "", "source": "github", "line_count": 156, "max_line_length": 88, "avg_line_length": 33.30769230769231, "alnum_prop": 0.6310623556581986, "repo_name": "mar10/wsgidav", "id": "1e0baeb198fe2b968e973aed5d0e9de2b45dd2e9", "size": "5287", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup_bdist_msi.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "942" }, { "name": "Dockerfile", "bytes": "918" }, { "name": "HTML", "bytes": "3213" }, { "name": "JavaScript", "bytes": "2663" }, { "name": "Python", "bytes": "659781" }, { "name": "Shell", "bytes": "111" } ], "symlink_target": "" }
from __future__ import unicode_literals import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone import user.models class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('nickname', models.CharField(max_length=50, unique=True)), ('bio', models.CharField(blank=True, max_length=120)), ('url', models.URLField(blank=True, max_length=100)), ('location', models.CharField(blank=True, max_length=100)), ('avatar', models.ImageField(upload_to=user.models.user_avatar_path)), ('last_login_ip', models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True)), ('ip_joined', models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True)), ('client_mark', models.CharField(blank=True, default='weixin', max_length=10, null=True)), ('weixin_nickName', models.CharField(blank=True, max_length=50, null=True)), ('weixin_avatarUrl', models.URLField(blank=True, null=True)), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'user', 'verbose_name_plural': 'users', 'abstract': False, }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
{ "content_hash": "d3e17aac01ba83112bcba5a6f8bc1645", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 329, "avg_line_length": 68.05454545454545, "alnum_prop": 0.6369222548757681, "repo_name": "lsdlab/awesome_coffice", "id": "a567fa1fc80e0c6fea5a798b3ddb673d87258d22", "size": "3814", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "user/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "10513" }, { "name": "HTML", "bytes": "101299" }, { "name": "JavaScript", "bytes": "11565" }, { "name": "Python", "bytes": "46001" } ], "symlink_target": "" }
""" Payload Voice ARI """ import functools from ari import client from ari import event from fysom import Fysom from oslo.config import cfg from payloadvoice.openstack.common import log from payloadvoice.openstack.common import uuidutils ASTERISK_OPTS = [ cfg.StrOpt( 'uri', default='http://127.0.0.1:8088/ari', help='Complete Asterisk REST interface endpoint.'), cfg.StrOpt( 'username', default='payload', help='Asterisk REST interface username.'), cfg.StrOpt( 'password', default=None, help='Asterisk REST interface password.'), ] ASTERISK_GROUP = cfg.OptGroup( name='asterisk', title='Options for Asterisk integration.') CONF = cfg.CONF CONF.register_group(ASTERISK_GROUP) CONF.register_opts(ASTERISK_OPTS, ASTERISK_GROUP) LOG = log.getLogger(__name__) class Connection(object): def __init__(self, fsm): self._bridges = dict() self._channels = dict() self.client = client.get_client( '1', endpoint=CONF.asterisk.uri, username=CONF.asterisk.username, password=CONF.asterisk.password) self.events = event.Event( url=CONF.asterisk.uri, username=CONF.asterisk.username, password=CONF.asterisk.password, app='demo') self.events.register_event('StasisStart', self._handle_stasis_start) self.events.register_event('StasisEnd', self._handle_stasis_stop) self.events.register_event( 'ChannelStateChange', self._handle_channel_state_change) self.events.register_event( 'ChannelEnteredBridge', self._handle_channel_entered_bridge) self.fsm = fsm def _handle_channel_entered_bridge(self, data): bridge = data['bridge']['id'] channel = data['channel']['id'] self._channels[channel].queue(channel=channel, bridge=bridge) def _handle_channel_state_change(self, data): channel = data['channel']['id'] if data['channel']['state'] == 'Up': self._channels[channel].channel_up(channel=channel) def _handle_stasis_start(self, data): channel = data['channel']['id'] self._channels[channel] = Fysom(self.fsm) self._channels[channel].start(channel=channel) def _handle_stasis_stop(self, data): channel = data['channel']['id'] self._channels[channel].end(channel=channel) del self._channels[channel] def answer(self, channel): self.client.channels.answer(channel) def bridge_create(self, channel): uuid = uuidutils.generate_uuid() self._bridges[channel] = uuid func = functools.partial( self._bridge_create_callback, channel=channel) self.client.bridges.create( uuid=uuid, type='mixing', callback=func) def bridge_delete(self, channel): self.client.bridges.delete(self._bridges[channel]) del self._bridges[channel] def _bridge_create_callback(self, bridge, channel): self.client.bridges.add_music( uuid=bridge.id) self.client.bridges.add( bridge.id, channel=channel)
{ "content_hash": "be29e848a4aabbf50b06bf8201447885", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 76, "avg_line_length": 32.275510204081634, "alnum_prop": 0.6380018969332911, "repo_name": "kickstandproject/payload-voice", "id": "c02b45b67b7629d7d62a0731792d168da595a0a0", "size": "3792", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "payloadvoice/asterisk.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "106434" } ], "symlink_target": "" }
"""Support for RESTful binary sensors.""" import voluptuous as vol from homeassistant.components.binary_sensor import ( DOMAIN as BINARY_SENSOR_DOMAIN, PLATFORM_SCHEMA, BinarySensorEntity, ) from homeassistant.const import ( CONF_DEVICE_CLASS, CONF_FORCE_UPDATE, CONF_NAME, CONF_RESOURCE, CONF_RESOURCE_TEMPLATE, CONF_VALUE_TEMPLATE, ) from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from . import async_get_config_and_coordinator, create_rest_data_from_config from .entity import RestEntity from .schema import BINARY_SENSOR_SCHEMA, RESOURCE_SCHEMA PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({**RESOURCE_SCHEMA, **BINARY_SENSOR_SCHEMA}) PLATFORM_SCHEMA = vol.All( cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the REST binary sensor.""" # Must update the sensor now (including fetching the rest resource) to # ensure it's updating its state. if discovery_info is not None: conf, coordinator, rest = await async_get_config_and_coordinator( hass, BINARY_SENSOR_DOMAIN, discovery_info ) else: conf = config coordinator = None rest = create_rest_data_from_config(hass, conf) await rest.async_update(log_errors=False) if rest.data is None: if rest.last_exception: raise PlatformNotReady from rest.last_exception raise PlatformNotReady name = conf.get(CONF_NAME) device_class = conf.get(CONF_DEVICE_CLASS) value_template = conf.get(CONF_VALUE_TEMPLATE) force_update = conf.get(CONF_FORCE_UPDATE) resource_template = conf.get(CONF_RESOURCE_TEMPLATE) if value_template is not None: value_template.hass = hass async_add_entities( [ RestBinarySensor( coordinator, rest, name, device_class, value_template, force_update, resource_template, ) ], ) class RestBinarySensor(RestEntity, BinarySensorEntity): """Representation of a REST binary sensor.""" def __init__( self, coordinator, rest, name, device_class, value_template, force_update, resource_template, ): """Initialize a REST binary sensor.""" super().__init__( coordinator, rest, name, device_class, resource_template, force_update ) self._state = False self._previous_data = None self._value_template = value_template self._is_on = None @property def is_on(self): """Return true if the binary sensor is on.""" return self._is_on def _update_from_rest_data(self): """Update state from the rest data.""" if self.rest.data is None: self._is_on = False response = self.rest.data if self._value_template is not None: response = self._value_template.async_render_with_possible_json_value( self.rest.data, False ) try: self._is_on = bool(int(response)) except ValueError: self._is_on = {"true": True, "on": True, "open": True, "yes": True}.get( response.lower(), False )
{ "content_hash": "8003e7102e4296d41791674d93947da5", "timestamp": "", "source": "github", "line_count": 118, "max_line_length": 86, "avg_line_length": 29.550847457627118, "alnum_prop": 0.6154287353025524, "repo_name": "sander76/home-assistant", "id": "a90c5bd7c770600cf6cee5787c2a4b2d6b12029c", "size": "3487", "binary": false, "copies": "5", "ref": "refs/heads/dev", "path": "homeassistant/components/rest/binary_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1795" }, { "name": "Python", "bytes": "36548768" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
import pycompiler import unittest import re # Copied from deps. TODO: Refactor to avoid copy+paste def striptags(x, v): tags = v.split(",") subbed = x for tag in tags: subbed = re.sub(r'<%s.*?>.*?</%s>' % (tag, tag), '', subbed) subbed = re.sub(r'<%s.*?/>' % tag, '', subbed) return subbed class TestFunctions(unittest.TestCase): def setUp(self): pass def verify_true_filter(self, code, indata): self.assertTrue(eval("lambda x:" + code)(indata)) def verify_false_filter(self, code, indata): self.assertFalse(eval("lambda x:" + code)(indata)) def verify_fine_filter(self, code, indata, expected_outdata): self.assertEquals(expected_outdata, eval("lambda x:" + code)(indata)) def test_starts_filter(self): code = pycompiler.compile_starts_filter("'hel'") self.verify_true_filter(code, "hello world") self.verify_true_filter(code, "helium") self.verify_false_filter(code, "HELLO") self.verify_false_filter(code, "foobar") def test_ends_filter(self): code = pycompiler.compile_ends_filter("'rld'") self.verify_true_filter(code, "hello world") self.verify_true_filter(code, "world") self.verify_false_filter(code, "WORLD") self.verify_false_filter(code, "foobar") def test_contains_filter(self): code = pycompiler.compile_contains_filter("'llo'") self.verify_true_filter(code, "hello world") self.verify_true_filter(code, "lollollol") self.verify_false_filter(code, "HELLO") self.verify_false_filter(code, "foobar") def test_length_filter(self): code = pycompiler.compile_length_filter("'>5'") self.verify_true_filter(code, "hello world") self.verify_true_filter(code, "lollollol") self.verify_false_filter(code, "HELLO") self.verify_false_filter(code, "foo") code = pycompiler.compile_length_filter("'<5'") self.verify_false_filter(code, "hello world") self.verify_false_filter(code, "lollollol") self.verify_false_filter(code, "HELLO") self.verify_true_filter(code, "foo") code = pycompiler.compile_length_filter("'==5'") self.verify_false_filter(code, "hello world") self.verify_false_filter(code, "lollollol") self.verify_true_filter(code, "HELLO") self.verify_false_filter(code, "foo") def test_matches_filter(self): code = pycompiler.compile_matches_filter("'[A-Z]+100[ABC]'") self.verify_true_filter(code, "HELLO100B") self.verify_true_filter(code, "YO100A") self.verify_false_filter(code, "hello100b") self.verify_false_filter(code, "foobar") def test_after_filter(self): code = pycompiler.compile_after_filter("'ello'") self.verify_fine_filter(code, "hello world", " world") self.verify_fine_filter(code, "hello hi, hello hey", " hi, hello hey") self.verify_fine_filter(code, "foobar", "") def test_before_filter(self): code = pycompiler.compile_before_filter("'llo'") self.verify_fine_filter(code, "hello world", "he") self.verify_fine_filter(code, "hello hi, hello hey", "he") self.verify_fine_filter(code, "foobar", "foobar") def test_afterpos_filter(self): code = pycompiler.compile_afterpos_filter("'0'") self.verify_fine_filter(code, "hello world", "hello world") code = pycompiler.compile_afterpos_filter("'2'") self.verify_fine_filter(code, "hello world", "llo world") code = pycompiler.compile_afterpos_filter("'200'") self.verify_fine_filter(code, "hello world", "") def test_beforepos_filter(self): code = pycompiler.compile_beforepos_filter("'0'") self.verify_fine_filter(code, "hello world", "") code = pycompiler.compile_beforepos_filter("'2'") self.verify_fine_filter(code, "hello world", "he") code = pycompiler.compile_beforepos_filter("'200'") self.verify_fine_filter(code, "hello world", "hello world") def test_exclude_filter(self): code = pycompiler.compile_exclude_filter("'lo'") self.verify_fine_filter(code, "hello world", "hel world") self.verify_fine_filter(code, "hello hello", "hel hel") self.verify_fine_filter(code, "foobar", "foobar") def test_striptags_filter(self): code = pycompiler.compile_striptags_filter("'p'") self.verify_fine_filter(code, "hello <p>foobar</p> world", "hello world") code = pycompiler.compile_striptags_filter("'img'") self.verify_fine_filter(code, "hello <img src='pic.jpg'/> world", "hello world") self.verify_fine_filter(code, "hello <img src='pic.jpg'/><img src='pic.jpg'/> world", "hello world") code = pycompiler.compile_striptags_filter("'p,img'") self.verify_fine_filter(code, "hello <p>hello</p><img src='pic.jpg'/>world", "hello world") if __name__ == '__main__': unittest.main()
{ "content_hash": "dc2b0ddf8715b08186657b58ef64b178", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 87, "avg_line_length": 43.53333333333333, "alnum_prop": 0.6045176110260337, "repo_name": "buffis/fetch", "id": "84c1ee650fe16c81a6c07e7f1d59b07562207f23", "size": "5224", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "boxofshame/test_pycompiler.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "67059" } ], "symlink_target": "" }
"""Provides device automations for control of device.""" from __future__ import annotations from typing import Any import voluptuous as vol from homeassistant.components.automation import ( AutomationActionType, AutomationTriggerInfo, ) from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE from homeassistant.core import CALLBACK_TYPE, HomeAssistant from homeassistant.helpers.device_registry import DeviceRegistry, async_get_registry from homeassistant.helpers.typing import ConfigType from . import PhilipsTVDataUpdateCoordinator from .const import DOMAIN TRIGGER_TYPE_TURN_ON = "turn_on" TRIGGER_TYPES = {TRIGGER_TYPE_TURN_ON} TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES), } ) async def async_get_triggers( hass: HomeAssistant, device_id: str ) -> list[dict[str, Any]]: """List device triggers for device.""" triggers = [] triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_TYPE: TRIGGER_TYPE_TURN_ON, } ) return triggers async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: AutomationTriggerInfo, ) -> CALLBACK_TYPE | None: """Attach a trigger.""" trigger_data = automation_info["trigger_data"] registry: DeviceRegistry = await async_get_registry(hass) if config[CONF_TYPE] == TRIGGER_TYPE_TURN_ON: variables = { "trigger": { **trigger_data, "platform": "device", "domain": DOMAIN, "device_id": config[CONF_DEVICE_ID], "description": f"philips_js '{config[CONF_TYPE]}' event", } } device = registry.async_get(config[CONF_DEVICE_ID]) for config_entry_id in device.config_entries: coordinator: PhilipsTVDataUpdateCoordinator = hass.data[DOMAIN].get( config_entry_id ) if coordinator: return coordinator.turn_on.async_attach(action, variables) return None
{ "content_hash": "fab0d36b0c5375dd8b66c84da97402b3", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 85, "avg_line_length": 30.342105263157894, "alnum_prop": 0.6556808326105811, "repo_name": "Danielhiversen/home-assistant", "id": "09784dae63fed03383e785f32fe755977a234e15", "size": "2306", "binary": false, "copies": "7", "ref": "refs/heads/dev", "path": "homeassistant/components/philips_js/device_trigger.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2443" }, { "name": "Python", "bytes": "36870185" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
from __future__ import print_function from twisted.spread import pb from twisted.internet import reactor class Two(pb.Referenceable): def remote_print(self, arg): print("Two.print() called with", arg) def main(): two = Two() factory = pb.PBClientFactory() reactor.connectTCP("localhost", 8800, factory) def1 = factory.getRootObject() def1.addCallback(got_obj, two) # hands our 'two' to the callback reactor.run() def got_obj(obj, two): print("got One:", obj) print("giving it our two") obj.callRemote("takeTwo", two) main()
{ "content_hash": "7dece4682ce03636a50769c6bf2a6878", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 68, "avg_line_length": 25.08695652173913, "alnum_prop": 0.6707105719237435, "repo_name": "EricMuller/mywebmarks-backend", "id": "f4104268989f93581520a739f604f2d659cb2124", "size": "673", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "requirements/twisted/Twisted-17.1.0/docs/core/howto/listings/pb/pb3client.py", "mode": "33261", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "23736" }, { "name": "Batchfile", "bytes": "3516" }, { "name": "C", "bytes": "37168" }, { "name": "CSS", "bytes": "66211" }, { "name": "DIGITAL Command Language", "bytes": "1032" }, { "name": "GAP", "bytes": "36244" }, { "name": "HTML", "bytes": "1087560" }, { "name": "Makefile", "bytes": "6766" }, { "name": "Nginx", "bytes": "998" }, { "name": "Objective-C", "bytes": "2584" }, { "name": "Python", "bytes": "23014526" }, { "name": "Roff", "bytes": "160293" }, { "name": "Shell", "bytes": "15482" }, { "name": "Smarty", "bytes": "1366" } ], "symlink_target": "" }
import unittest import multiprocessing import time import server, client class MainTest (unittest.TestCase): """ This test runs the server in a shell subprocess and after a second it runs the client in another shell subprocess. Afterwards, it kills both processes and run assertions on the output. The testing is a little bit brutal. """ def test_run(self): sp = multiprocessing.Process(target=server.run, args=()) cp = multiprocessing.Process(target=client.run, args=()) sp.start() time.sleep(1) cp.start() time.sleep(1) sp.terminate() f = open('client.log') output = [line.rstrip() for line in f.readlines()] f.close() print output self.assertEqual(output, ['OUT:hello', 'IN:hello', 'OUT:from', 'IN:from', 'OUT:twisted', 'IN:twisted', 'OUT:client', 'IN:client', 'OUT:exit', 'IN:exit'])
{ "content_hash": "c054884ad84d72f5732d24f69a271a1a", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 161, "avg_line_length": 35.15384615384615, "alnum_prop": 0.6334792122538293, "repo_name": "ducin/twisted-echo-server", "id": "22b8f3d56051cc671c0e9d69604e81591a15c9ba", "size": "914", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/main_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2096" } ], "symlink_target": "" }