blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54c3b0e260c86c076df393e5c072e11bffa98d3b | cfa7034c6bdaaa45c541666ca27b50c75caf707f | /inference.py | 4afbde7190489a5ba2044cb6c3bca5282eee4a03 | [] | no_license | AChengy/Artificial_Intelligence_Pacaman_Tracking | a472dfa440d49729813cc47b3a921e7a0f0f1415 | fe69ffa1cb1494b1722a1e9ad5bd54691707edad | refs/heads/master | 2021-01-01T03:59:36.509201 | 2016-04-23T22:56:18 | 2016-04-23T22:56:18 | 56,904,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,983 | py | # inference.py
# ------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import itertools
import util
import random
import busters
import game
class InferenceModule:
"""
An inference module tracks a belief distribution over a ghost's location.
This is an abstract class, which you should not modify.
"""
############################################
# Useful methods for all inference modules #
############################################
def __init__(self, ghostAgent):
"Sets the ghost agent for later access"
self.ghostAgent = ghostAgent
self.index = ghostAgent.index
self.obs = [] # most recent observation position
def getJailPosition(self):
return (2 * self.ghostAgent.index - 1, 1)
def getPositionDistribution(self, gameState):
"""
Returns a distribution over successor positions of the ghost from the
given gameState.
You must first place the ghost in the gameState, using setGhostPosition
below.
"""
ghostPosition = gameState.getGhostPosition(self.index) # The position you set
actionDist = self.ghostAgent.getDistribution(gameState)
dist = util.Counter()
for action, prob in actionDist.items():
successorPosition = game.Actions.getSuccessor(ghostPosition, action)
dist[successorPosition] = prob
return dist
def setGhostPosition(self, gameState, ghostPosition):
"""
Sets the position of the ghost for this inference module to the
specified position in the supplied gameState.
Note that calling setGhostPosition does not change the position of the
ghost in the GameState object used for tracking the true progression of
the game. The code in inference.py only ever receives a deep copy of
the GameState object which is responsible for maintaining game state,
not a reference to the original object. Note also that the ghost
distance observations are stored at the time the GameState object is
created, so changing the position of the ghost will not affect the
functioning of observeState.
"""
conf = game.Configuration(ghostPosition, game.Directions.STOP)
gameState.data.agentStates[self.index] = game.AgentState(conf, False)
return gameState
def observeState(self, gameState):
"Collects the relevant noisy distance observation and pass it along."
distances = gameState.getNoisyGhostDistances()
if len(distances) >= self.index: # Check for missing observations
obs = distances[self.index - 1]
self.obs = obs
self.observe(obs, gameState)
def initialize(self, gameState):
"Initializes beliefs to a uniform distribution over all positions."
# The legal positions do not include the ghost prison cells in the bottom left.
self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]
self.initializeUniformly(gameState)
######################################
# Methods that need to be overridden #
######################################
def initializeUniformly(self, gameState):
"Sets the belief state to a uniform prior belief over all positions."
pass
def observe(self, observation, gameState):
"Updates beliefs based on the given distance observation and gameState."
pass
def elapseTime(self, gameState):
"Updates beliefs for a time step elapsing from a gameState."
pass
def getBeliefDistribution(self):
"""
Returns the agent's current belief state, a distribution over ghost
locations conditioned on all evidence so far.
"""
pass
class ExactInference(InferenceModule):
"""
The exact dynamic inference module should use forward-algorithm updates to
compute the exact belief function at each time step.
"""
def initializeUniformly(self, gameState):
"Begin with a uniform distribution over ghost positions."
self.beliefs = util.Counter()
for p in self.legalPositions: self.beliefs[p] = 1.0
self.beliefs.normalize()
def observe(self, observation, gameState):
"""
Updates beliefs based on the distance observation and Pacman's position.
The noisyDistance is the estimated Manhattan distance to the ghost you
are tracking.
The emissionModel below stores the probability of the noisyDistance for
any true distance you supply. That is, it stores P(noisyDistance |
TrueDistance).
self.legalPositions is a list of the possible ghost positions (you
should only consider positions that are in self.legalPositions).
A correct implementation will handle the following special case:
* When a ghost is captured by Pacman, all beliefs should be updated
so that the ghost appears in its prison cell, position
self.getJailPosition()
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
"""
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
"*** YOUR CODE HERE ***"
# Replace this code with a correct observation update
# Be sure to handle the "jail" edge case where the ghost is eaten
# and noisyDistance is None
# allPossible = util.Counter()
# for p in self.legalPositions:
# trueDistance = util.manhattanDistance(p, pacmanPosition)
# if emissionModel[trueDistance] > 0:
# allPossible[p] = 1.0
allPossible = util.Counter()
if noisyDistance == None:
allPossible = util.Counter()
allPossible[self.getJailPosition()]=1.0
else:
for location in self.legalPositions:
distance = util.manhattanDistance(location, pacmanPosition)
allPossible[location] = emissionModel[distance] * self.beliefs[location]
"*** END YOUR CODE HERE ***"
allPossible.normalize()
self.beliefs = allPossible
def elapseTime(self, gameState):
"""
Update self.beliefs in response to a time step passing from the current
state.
The transition model is not entirely stationary: it may depend on
Pacman's current position (e.g., for DirectionalGhost). However, this
is not a problem, as Pacman's current position is known.
In order to obtain the distribution over new positions for the ghost,
given its previous position (oldPos) as well as Pacman's current
position, use this line of code:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
Note that you may need to replace "oldPos" with the correct name of the
variable that you have used to refer to the previous ghost position for
which you are computing this distribution. You will need to compute
multiple position distributions for a single update.
newPosDist is a util.Counter object, where for each position p in
self.legalPositions,
newPostDist[p] = Pr( ghost is at position p at time t + 1 | ghost is at position oldPos at time t )
(and also given Pacman's current position). You may also find it useful
to loop over key, value pairs in newPosDist, like:
for newPos, prob in newPosDist.items():
...
*** GORY DETAIL AHEAD ***
As an implementation detail (with which you need not concern yourself),
the line of code at the top of this comment block for obtaining
newPosDist makes use of two helper methods provided in InferenceModule
above:
1) self.setGhostPosition(gameState, ghostPosition)
This method alters the gameState by placing the ghost we're
tracking in a particular position. This altered gameState can be
used to query what the ghost would do in this position.
2) self.getPositionDistribution(gameState)
This method uses the ghost agent to determine what positions the
ghost will move to from the provided gameState. The ghost must be
placed in the gameState with a call to self.setGhostPosition
above.
It is worthwhile, however, to understand why these two helper methods
are used and how they combine to give us a belief distribution over new
positions after a time update from a particular position.
"""
"*** YOUR CODE HERE ***"
allPossible = util.Counter()
for oldPosition in self.legalPositions:
newPositionDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPosition))
for newPosition, probability in newPositionDist.items():
allPossible[newPosition] += (probability*self.beliefs[oldPosition])
self.beliefs = allPossible
#util.raiseNotDefined()
def getBeliefDistribution(self):
return self.beliefs
class ParticleFilter(InferenceModule):
"""
A particle filter for approximately tracking a single ghost.
Useful helper functions will include random.choice, which chooses an element
from a list uniformly at random, and util.sample, which samples a key from a
Counter by treating its values as probabilities.
"""
def __init__(self, ghostAgent, numParticles=300):
InferenceModule.__init__(self, ghostAgent);
self.setNumParticles(numParticles)
def setNumParticles(self, numParticles):
self.numParticles = numParticles
def initializeUniformly(self, gameState):
"""
Initializes a list of particles. Use self.numParticles for the number of
particles. Use self.legalPositions for the legal board positions where a
particle could be located. Particles should be evenly (not randomly)
distributed across positions in order to ensure a uniform prior.
Note: the variable you store your particles in must be a list; a list is
simply a collection of unweighted variables (positions in this case).
Storing your particles as a Counter (where there could be an associated
weight with each position) is incorrect and may produce errors.
"""
"*** YOUR CODE HERE ***"
count, self.particles = 0, []
while count < self.numParticles:
for position in self.legalPositions:
if count < self.numParticles:
self.particles.append(position)
count += 1
def observe(self, observation, gameState):
"""
Update beliefs based on the given distance observation. Make sure to
handle the special case where all particles have weight 0 after
reweighting based on observation. If this happens, resample particles
uniformly at random from the set of legal positions
(self.legalPositions).
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, all particles should be updated
so that the ghost appears in its prison cell,
self.getJailPosition()
As before, you can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None.
2) When all particles receive 0 weight, they should be recreated from
the prior distribution by calling initializeUniformly. The total
weight for a belief distribution can be found by calling totalCount
on a Counter object
util.sample(Counter object) is a helper method to generate a sample from
a belief distribution.
You may also want to use util.manhattanDistance to calculate the
distance between a particle and Pacman's position.
"""
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
"*** YOUR CODE HERE ***"
if noisyDistance == None:
self.particles = [self.getJailPosition()] * self.numParticles
else:
allPossible, oldBelief = util.Counter(), self.getBeliefDistribution()
for location in self.legalPositions:
distance = util.manhattanDistance(location, pacmanPosition)
allPossible[location] += emissionModel[distance] * oldBelief[location]
if not any(allPossible.values()):
self.initializeUniformly(gameState)
else:
tempParticles = []
for _ in range(0, self.numParticles):
tempParticles.append(util.sample(allPossible))
self.particles = tempParticles
def elapseTime(self, gameState):
"""
Update beliefs for a time step elapsing.
As in the elapseTime method of ExactInference, you should use:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
to obtain the distribution over new positions for the ghost, given its
previous position (oldPos) as well as Pacman's current position.
util.sample(Counter object) is a helper method to generate a sample from
a belief distribution.
"""
"*** YOUR CODE HERE ***"
tempParticles = []
for oldPosition in self.particles:
newPositionDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPosition))
tempParticles.append(util.sample(newPositionDist))
self.particles = tempParticles
#util.raiseNotDefined()
def getBeliefDistribution(self):
"""
Return the agent's current belief state, a distribution over ghost
locations conditioned on all evidence and time passage. This method
essentially converts a list of particles into a belief distribution (a
Counter object)
"""
"*** YOUR CODE HERE ***"
beliefDistribution = util.Counter()
for element in self.particles:
beliefDistribution[element] += 1
beliefDistribution.normalize()
return beliefDistribution
class MarginalInference(InferenceModule):
"""
A wrapper around the JointInference module that returns marginal beliefs
about ghosts.
"""
def initializeUniformly(self, gameState):
"Set the belief state to an initial, prior value."
if self.index == 1:
jointInference.initialize(gameState, self.legalPositions)
jointInference.addGhostAgent(self.ghostAgent)
def observeState(self, gameState):
"Update beliefs based on the given distance observation and gameState."
if self.index == 1:
jointInference.observeState(gameState)
def elapseTime(self, gameState):
"Update beliefs for a time step elapsing from a gameState."
if self.index == 1:
jointInference.elapseTime(gameState)
def getBeliefDistribution(self):
"Returns the marginal belief over a particular ghost by summing out the others."
jointDistribution = jointInference.getBeliefDistribution()
dist = util.Counter()
for t, prob in jointDistribution.items():
dist[t[self.index - 1]] += prob
return dist
class JointParticleFilter:
"""
JointParticleFilter tracks a joint distribution over tuples of all ghost
positions.
"""
def __init__(self, numParticles=600):
self.setNumParticles(numParticles)
def setNumParticles(self, numParticles):
self.numParticles = numParticles
def initialize(self, gameState, legalPositions):
"Stores information about the game, then initializes particles."
self.numGhosts = gameState.getNumAgents() - 1
self.ghostAgents = []
self.legalPositions = legalPositions
self.initializeParticles()
def initializeParticles(self):
"""
Initialize particles to be consistent with a uniform prior.
Each particle is a tuple of ghost positions. Use self.numParticles for
the number of particles. You may find the `itertools` package helpful.
Specifically, you will need to think about permutations of legal ghost
positions, with the additional understanding that ghosts may occupy the
same space. Look at the `itertools.product` function to get an
implementation of the Cartesian product.
Note: If you use itertools, keep in mind that permutations are not
returned in a random order; you must shuffle the list of permutations in
order to ensure even placement of particles across the board. Use
self.legalPositions to obtain a list of positions a ghost may occupy.
Note: the variable you store your particles in must be a list; a list is
simply a collection of unweighted variables (positions in this case).
Storing your particles as a Counter (where there could be an associated
weight with each position) is incorrect and may produce errors.
"""
"*** YOUR CODE HERE ***"
possiblePositions = list(itertools.product(self.legalPositions, repeat = self.numGhosts))
random.shuffle(possiblePositions)
count, self.particles = 0, []
while count < self.numParticles:
for position in possiblePositions:
if count < self.numParticles:
self.particles.append(position)
count += 1
else:
break
def addGhostAgent(self, agent):
"""
Each ghost agent is registered separately and stored (in case they are
different).
"""
self.ghostAgents.append(agent)
def getJailPosition(self, i):
return (2 * i + 1, 1);
def observeState(self, gameState):
"""
Resamples the set of particles using the likelihood of the noisy
observations.
To loop over the ghosts, use:
for i in range(self.numGhosts):
...
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, all particles should be updated
so that the ghost appears in its prison cell, position
self.getJailPosition(i) where `i` is the index of the ghost.
As before, you can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None.
2) When all particles receive 0 weight, they should be recreated from
the prior distribution by calling initializeParticles. After all
particles are generated randomly, any ghosts that are eaten (have
noisyDistance of None) must be changed to the jail Position. This
will involve changing each particle if a ghost has been eaten.
self.getParticleWithGhostInJail is a helper method to edit a specific
particle. Since we store particles as tuples, they must be converted to
a list, edited, and then converted back to a tuple. This is a common
operation when placing a ghost in jail.
"""
pacmanPosition = gameState.getPacmanPosition()
noisyDistances = gameState.getNoisyGhostDistances()
if len(noisyDistances) < self.numGhosts:
return
emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
"*** YOUR CODE HERE ***"
allPossible = util.Counter()
for index, particle in enumerate(self.particles):
partial =1.0
for iterator in range(self.numGhosts):
if noisyDistances[iterator]==None:
particle = self.getParticleWithGhostInJail(particle, iterator)
else:
distance = util.manhattanDistance(particle[iterator],pacmanPosition)
partial *= emissionModels[iterator][distance]
allPossible[particle]+=partial
if not any(allPossible.values()):
self.initializeParticles()
for index, particle in enumerate(self.particles):
for iterator in range(self.numGhosts):
if noisyDistances[iterator] == None:
particle = self.getParticleWithGhostInJail(particle, iterator)
else:
allPossible.normalize()
tempParticles = []
for _ in range(0, self.numParticles):
tempParticles.append(util.sample(allPossible))
self.particles = tempParticles
def getParticleWithGhostInJail(self, particle, ghostIndex):
"""
Takes a particle (as a tuple of ghost positions) and returns a particle
with the ghostIndex'th ghost in jail.
"""
particle = list(particle)
particle[ghostIndex] = self.getJailPosition(ghostIndex)
return tuple(particle)
def elapseTime(self, gameState):
"""
Samples each particle's next state based on its current state and the
gameState.
To loop over the ghosts, use:
for i in range(self.numGhosts):
...
Then, assuming that `i` refers to the index of the ghost, to obtain the
distributions over new positions for that single ghost, given the list
(prevGhostPositions) of previous positions of ALL of the ghosts, use
this line of code:
newPosDist = getPositionDistributionForGhost(
setGhostPositions(gameState, prevGhostPositions), i, self.ghostAgents[i]
)
Note that you may need to replace `prevGhostPositions` with the correct
name of the variable that you have used to refer to the list of the
previous positions of all of the ghosts, and you may need to replace `i`
with the variable you have used to refer to the index of the ghost for
which you are computing the new position distribution.
As an implementation detail (with which you need not concern yourself),
the line of code above for obtaining newPosDist makes use of two helper
functions defined below in this file:
1) setGhostPositions(gameState, ghostPositions)
This method alters the gameState by placing the ghosts in the
supplied positions.
2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
This method uses the supplied ghost agent to determine what
positions a ghost (ghostIndex) controlled by a particular agent
(ghostAgent) will move to in the supplied gameState. All ghosts
must first be placed in the gameState using setGhostPositions
above.
The ghost agent you are meant to supply is
self.ghostAgents[ghostIndex-1], but in this project all ghost
agents are always the same.
"""
newParticles = []
for oldParticle in self.particles:
newParticle = list(oldParticle) # A list of ghost positions
# now loop through and update each entry in newParticle...
"*** YOUR CODE HERE ***"
for iterator in range(self.numGhosts):
newPositionDist = getPositionDistributionForGhost(setGhostPositions(gameState, newParticle), iterator, self.ghostAgents[iterator])
newParticle[iterator] = util.sample(newPositionDist)
"*** END YOUR CODE HERE ***"
newParticles.append(tuple(newParticle))
self.particles = newParticles
def getBeliefDistribution(self):
"*** YOUR CODE HERE ***"
beliefDistribution = util.Counter()
for element in self.particles:
beliefDistribution[element] += 1.0
beliefDistribution.normalize()
return beliefDistribution
# One JointInference module is shared globally across instances of MarginalInference
jointInference = JointParticleFilter()
def getPositionDistributionForGhost(gameState, ghostIndex, agent):
"""
Returns the distribution over positions for a ghost, using the supplied
gameState.
"""
# index 0 is pacman, but the students think that index 0 is the first ghost.
ghostPosition = gameState.getGhostPosition(ghostIndex+1)
actionDist = agent.getDistribution(gameState)
dist = util.Counter()
for action, prob in actionDist.items():
successorPosition = game.Actions.getSuccessor(ghostPosition, action)
dist[successorPosition] = prob
return dist
def setGhostPositions(gameState, ghostPositions):
"Sets the position of all ghosts to the values in ghostPositionTuple."
for index, pos in enumerate(ghostPositions):
conf = game.Configuration(pos, game.Directions.STOP)
gameState.data.agentStates[index + 1] = game.AgentState(conf, False)
return gameState
| [
"alex.chengelis@gmail.com"
] | alex.chengelis@gmail.com |
2e35b80c7caaa11de6e7a6c8ec18671878f4edcd | c20e0c8b68e0c42afae42d3fc9afda7fdde41744 | /NumberGuess.py | f7135fdc13bffd1c84447e4f7ec7f215293110e5 | [] | no_license | MattaAtakan/NumberGuess | 65f79c09df26194ef355f8ce1a63b0b48992d33f | 477d5b972c05209b97919720cb1866e66cecf264 | refs/heads/master | 2022-07-10T09:28:13.202828 | 2020-05-16T11:51:36 | 2020-05-16T11:51:36 | 264,425,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | from random import randint
from time import sleep
def get_user_guess():
guess = int(input("Guess the number: "))
return guess
def roll_dice(number_of_sides):
first_roll = randint(1, number_of_sides)
second_roll = randint(1, number_of_sides)
max_val = number_of_sides * 2
print("The maximum possible value is: ", max_val)
guess = get_user_guess()
if guess > max_val:
print("Error!")
return
else:
print("Rolling...")
sleep(2)
print("First roll :", first_roll)
sleep(1)
print("Second roll : ", second_roll)
sleep(1)
total_roll = first_roll + second_roll
print("The tatal value is : ", total_roll)
print("Result...")
sleep(2)
if guess == total_roll:
print("You have won !")
sleep(8)
else :
print("You have lost !")
sleep(8)
roll_dice(6)
| [
"noreply@github.com"
] | MattaAtakan.noreply@github.com |
5cbe16584b54d3540a89ca8f46bb85656526da34 | 0152b53f7f1a47997d99e6b87402efc3ba7a799e | /day8.py | b9018daee532e0a37d61c16b568df1c78e546cba | [] | no_license | KVooys/AdventOfCode2017 | 8fba09039aa26aa18ddafeb83be6879de171f5fa | 245aa7d4a6c3ca0eeaf6f59ca7085f0c2448af67 | refs/heads/master | 2021-05-06T22:42:48.502213 | 2017-12-17T16:16:26 | 2017-12-17T16:16:26 | 112,851,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | """
Input is a set of instructions related to named registers
if the conditional is true, perform the change
Find what the largest register is at the end
"""
from collections import defaultdict
# store register values here
registers = defaultdict(int)
# extract variables from the instruction
test = "b inc 5 if a > 1"
highestvalue = 0
with open('inputday8.txt', 'r') as f:
for line in f:
(
change_register,
change,
change_value,
_if,
check_register,
operator,
check_value
) = line.split()
change_value, check_value = int(change_value), int(check_value)
print(check_register, operator, check_value)
# logic on the check; different operators
perform_operation = False
if operator == "<" and registers[check_register] < check_value:
perform_operation = True
elif operator == "<=" and registers[check_register] <= check_value:
perform_operation = True
elif operator == ">" and registers[check_register] > check_value:
perform_operation = True
elif operator == ">=" and registers[check_register] >= check_value:
perform_operation = True
elif operator == "==" and registers[check_register] == check_value:
perform_operation = True
elif operator == "!=" and registers[check_register] != check_value:
perform_operation = True
# print(perform_operation)
# Finally, if we have to operate, operate
if perform_operation:
if change == "inc":
registers[change_register] += change_value
if change == "dec":
registers[change_register] -= change_value
# part 2: keep track of highest value
highestvalue = max(highestvalue, max(registers.values()))
print(registers)
maximum = max(registers, key=registers.get)
print(maximum, registers[maximum])
print(highestvalue) | [
"k.vooys@hotmail.com"
] | k.vooys@hotmail.com |
2d56d902771278a4dbfd5ae09f7f13cf70c78b82 | 1ace2c0a2022024cf987f8087e4f6d5fc3865b1d | /lib/dataset/transforms/__init__.py | 7dd8c7cd0e1bcc0d12d4874ac745c7729c9b13eb | [] | no_license | ZJULiHongxin/HRNet-Hand-Pose-Estimation | b6b31f34d55067f853d6458240c5e59909670ddd | 9ce02e7161051e7a25573a7791117e1e6b92abd3 | refs/heads/main | 2023-05-05T10:34:28.263160 | 2021-05-20T01:43:50 | 2021-05-20T01:43:50 | 369,047,460 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from .transforms import Compose
from .transforms import RandomAffineTransform
from .transforms import ToTensor
from .transforms import Normalize
from .transforms import RandomHorizontalFlip
from . import Mytransforms
from .build import build_transforms
from .build import FLIP_CONFIG
| [
"hongxin_li@zju.edu.cn"
] | hongxin_li@zju.edu.cn |
284a051b4199ebc1e0859e2bc7ce26faacac59c5 | b7a97c2919807983cd418d9262a1246fff9d95a1 | /apps/feeder/models/order.py | 77d0d82a6596d396f812baa2efc04c2fd78f327f | [] | no_license | PUYUP/kirimsaran | da2f439c70979ab88ef2e62e3b2a73c2278ce077 | 250dddddc3d22429c26eed6bfeaf054666f0c110 | refs/heads/main | 2023-08-04T10:11:23.016982 | 2021-09-29T00:59:11 | 2021-09-29T00:59:11 | 397,851,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,508 | py | from django.core.validators import RegexValidator
from django.db import models, transaction
from django.conf import settings
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from .abstract import AbstractCommonField
from ..utils import save_random_identifier
class AbstractOrder(AbstractCommonField):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='orders'
)
broadcast = models.ForeignKey(
'feeder.Broadcast',
on_delete=models.SET_NULL,
related_name='orders',
null=True,
blank=True
)
fragment = models.ForeignKey(
'feeder.Fragment',
on_delete=models.SET_NULL,
related_name='orders',
null=True,
blank=True
)
identifier = models.CharField(
max_length=7,
editable=False,
validators=[
RegexValidator(
regex='^[a-zA-Z0-9]*$',
message=_("Can only contain the letters a-Z and 0-9."),
code='invalid_identifier'
),
]
)
class Meta:
abstract = True
app_label = 'feeder'
ordering = ['-create_at']
def __str__(self) -> str:
return self.broadcast.label
@transaction.atomic
def save(self, *args, **kwargs):
# Generate random identifier
if not self.pk and not self.identifier:
# We pass the model instance that is being saved
self.identifier = save_random_identifier(self)
return super().save(*args, **kwargs)
@transaction.atomic
def insert_meta(self, meta_dict):
OrderMeta = apps.get_registered_model('feeder', 'OrderMeta')
bulk_meta = []
for meta in meta_dict:
o = OrderMeta(order=self, **meta)
bulk_meta.append(o)
if len(meta_dict) > 0:
try:
OrderMeta.objects.bulk_create(
bulk_meta,
ignore_conflicts=False
)
except Exception as e:
print(e)
@transaction.atomic
def insert_order_item(self, item_dict):
OrderItem = apps.get_registered_model('feeder', 'OrderItem')
bulk_item = []
for item in item_dict:
target = item.get('target', None)
if target:
o = OrderItem(order=self, target=target)
bulk_item.append(o)
if len(bulk_item) > 0:
try:
OrderItem.objects.bulk_create(
bulk_item,
ignore_conflicts=False
)
except Exception as e:
print(e)
class AbstractOrderMeta(AbstractCommonField):
order = models.ForeignKey(
'feeder.Order',
on_delete=models.CASCADE,
related_name='metas'
)
meta_key = models.CharField(max_length=255)
meta_value = models.TextField()
class Meta:
abstract = True
app_label = 'feeder'
ordering = ['-create_at']
def __str__(self) -> str:
return self.meta_key
class OrderItemManager(models.Manager):
@transaction.atomic
def bulk_create(self, objs, **kwargs):
for obj in objs:
target = getattr(obj, 'target', None)
if target:
setattr(obj, 'price', target.price)
setattr(obj, 'method', target.method)
setattr(obj, 'value', target.value)
return super().bulk_create(objs, **kwargs)
class AbstractOrderItem(AbstractCommonField):
order = models.ForeignKey(
'feeder.Order',
on_delete=models.CASCADE,
related_name='items'
)
target = models.ForeignKey(
'feeder.Target',
on_delete=models.SET_NULL,
related_name='items',
null=True,
blank=True
)
price = models.IntegerField(default=0)
method = models.CharField(max_length=255)
value = models.CharField(max_length=255)
objects = OrderItemManager()
class Meta:
abstract = True
app_label = 'feeder'
ordering = ['-create_at']
def __str__(self) -> str:
return str(self.price)
@transaction.atomic
def save(self, *args, **kwargs):
if not self.pk:
self.price = self.target.price
self.method = self.target.method
self.value = self.target.value
return super().save(*args, **kwargs)
| [
"hellopuyup@gmail.com"
] | hellopuyup@gmail.com |
b8eb80f7b390fefa87b95d483d818254e4c5eef7 | 2e9b81b3b33504069c9861587fb7ef7e14631b04 | /Script/Search_By_Router_ARP.py | 786048c538a0596d98dd53f6d04f56e9a898cd41 | [
"MIT"
] | permissive | omarthe95/Cisco-MAC-Tracer | d4ff998dda1a7e9e210d6d991fde669a6bcfcb55 | 8b8083232c2b748e42245fb13f4b776a2371ce61 | refs/heads/master | 2020-06-04T16:49:12.122279 | 2020-04-25T23:24:53 | 2020-04-25T23:24:53 | 192,110,250 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,969 | py | #!/usr/bin/python3
import netmiko, glob ,os ,sys, re, socket, json
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException
from paramiko.ssh_exception import SSHException
from netmiko.ssh_exception import AuthenticationException
USER = 'cisco'
PASSWORD = 'Cisco123'
IPacc=""
cisco_ios_ACC = {'device_type': 'cisco_ios','ip': IPacc,'username': USER,'password': PASSWORD,'timeout':15}
#Read Routers IPs
Routers=[]
with open('IPaddress_Routers.txt') as f:
for line in f:
line = line.strip()
try:
socket.inet_aton(line)
Routers.append(line)
except socket.error:
print ("Invalid IP address " + line)
sys.exit(1)
print ("This is Routers IPs: ")
print (str(Routers) + "\n")
# Input IP Address to Look for
IP = input ("Enter Host IP address : ")
try:
socket.inet_aton(IP)
except socket.error:
print ("Invalid IP address \nexecpt ")
sys.exit(1)
os.chdir("output/")
with open('dict.json') as json_file:
HOST_IP = json.load(json_file)
# Start Gathring ARP table info
for IPacc in Routers:
cisco_ios_ACC = {'device_type': 'cisco_ios','ip': IPacc,'username': USER,'password': PASSWORD}
try:
ssh_session = ConnectHandler(**cisco_ios_ACC)
except (AuthenticationException):
print("Wrong Authentication >>> "+ (cisco_ios_ACC['ip']) + "\n")
pass
except (NetMikoTimeoutException):
print("Timeout >>> "+ (cisco_ios_ACC['ip']) + "\n")
pass
except (EOFError):
print("EOF Error >>> "+ (cisco_ios_ACC['ip']) + "\n")
pass
except (SSHException):
print("Error SSH Exception >>> "+ (cisco_ios_ACC['ip']) + "\n")
pass
except Exception as unknown_error:
print("Unkown Error >>> "+ (cisco_ios_ACC['ip']) + "\n")
pass
else:
# Get the device Hostanme
hostname = ssh_session.send_command('show run | inc host')
hostname = hostname[9:]
hostname.split(" ")
# Extract MAC address from arp tabel
output = ssh_session.send_command('show ip arp | inc ' + (IP))
MAC = (re.findall(r'((?:[0-9a-f]{4}\.){2}[0-9a-f]{4})', output))
MAC = ''.join(MAC)
if MAC == "":
print ("MAC Not Found")
sys.exit(1)
else:
print("Found MAC \n" )
# Start Searching for the MAC in output files
x=0
for file in glob.glob('*'):
with open(file) as f:
contents = f.read()
for line in contents.split("\n"):
if MAC in line:
print ("#"*75)
print ("Found on >>>> " + file + " <<<< ")
print (line)
print ("#"*75)
# Interface/port number
INT = line.rsplit(' ', 1)[1]
IntCheck = input ("\nDo you want to SSH into " + file + " with (show run int " + INT + ") ?\n (Yes or No) : ")
if IntCheck == "" or IntCheck == "Yes" or IntCheck == "yes" or IntCheck == "Y" or IntCheck == "y":
cisco_ios_Router = {'device_type': 'cisco_ios','ip': HOST_IP[file],'username': USER,'password': PASSWORD}
ssh_session = ConnectHandler(**cisco_ios_Router)
output = ssh_session.send_command('show run int ' + (INT))
print (output)
elif IntCheck == "No" or IntCheck == "no" or IntCheck == "N" or IntCheck == "n":
print ("Tahnks Bye \n")
pass
else:
print("Invalid entry, Please enter yes or no.")
sys.exit(1)
x+=1
if line is None:
print ("MAC Dont exist"+"\n")
if x>1:
print (">>>>>>> Duplication <<<<<<<<"+"\n")
| [
"noreply@github.com"
] | omarthe95.noreply@github.com |
cc7ddd7c61ab177c7b5fccf25af20b075b8a99d1 | a072a312eae030c471d19f6a67735337a146d396 | /dataSetFromDb/test_agent.py | 6cd0a3a6d50d8c6de94c325947123469a423c908 | [] | no_license | allnightlight/LearningWithTemporalPointProcess | a26ea224fa7cd3750292ea5989d50a1d03a205df | 2c2565c76878c23dba74d1447e3f0c4a50edb95b | refs/heads/master | 2022-11-09T17:47:28.979752 | 2020-06-23T07:59:48 | 2020-06-23T07:59:48 | 261,947,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | '''
Created on 2020/06/15
@author: ukai
'''
import unittest
from agentGru import AgentGru
import torch
import numpy as np
import itertools
class Test(unittest.TestCase):
def test001(self):
for (Npv, Nseq, Ndelta, Nh)
in itertools.product(
(10, 0)
, (1, 2**3)
, (1, 3)
, (2**5,)
)
args = dict(
Ndelta = Ndelta
, Npv = Npv
, Nh = Nh)
agent = AgentGru(**args)
_E = torch.rand(Nseq, Nbatch, Ndelta) # (Nseq, *, Ndelta)
_Pv = torch.randn(Nseq, Nbatch, Npv) # (Nseq, *, Npv)
_Phat, _Yhat = agent(_E, _Pv)
assert _Phat.shape == (Nseq+1, Nbatch, Ndelta)
assert _Yhat.shape == (Nseq+1, Nbatch, Ndelta)
Phat = _Phat.data.numpy()
assert np.all((Phat >= -1e-16) & (Phat <= 1+1e-16))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test001']
unittest.main() | [
"shouta.ukai@gmail.com"
] | shouta.ukai@gmail.com |
80330976f796cd31bd492341d4011ad69f3d0738 | 806652855eff597797b923c89d62745993551444 | /docs/conf.py | 0abf2beec0723b742af3ff2ca2ad02bf5ae14b3d | [
"MIT"
] | permissive | bellini666/graphene-django-plus | 56248f352ffbd96b6c9654201da062a8e87b9387 | 945e8ff293ffe11cff04928c54ee67deeba4df28 | refs/heads/master | 2021-07-20T18:14:58.419890 | 2020-07-29T12:57:08 | 2020-07-29T12:57:08 | 200,929,655 | 0 | 0 | null | 2019-08-06T21:55:57 | 2019-08-06T21:55:56 | null | UTF-8 | Python | false | false | 2,121 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
sys.setrecursionlimit(1500)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
# -- Project information -----------------------------------------------------
project = 'graphene-django-plus'
copyright = '2019-2020, Zerosoft Tecnologia LTDA'
author = 'Zerosoft Tecnologia LTDA'
# The full version, including alpha/beta/rc tags
release = '2.3.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| [
"thiago@bellini.dev"
] | thiago@bellini.dev |
da8ed081dab3d99f9f34ef1edfe060e92520f36b | 637ea32963b3ba0964d9dbdaee60c158a307273d | /Dynamic Programming/0-1 Knapsack/top-down 0-1 knapsack.py | 3b63c46830260a38a685ecc18f1643a6c349abc9 | [] | no_license | prachi1807/Data-Structures-and-Algorithms | 0283aa6c3f9794d942819355512aa99e65ab3143 | 7029c75863a65dc85000e4eb4d02456c60c7daa8 | refs/heads/main | 2023-08-10T10:24:45.108313 | 2021-08-29T16:53:33 | 2021-08-29T16:53:33 | 341,242,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | # In the top down approach, we completely omit the recursive calls
# Every value is stored in the DP matrix and the answer is the last value of the DP Matrix
# We make a DP Matrix with one additional row and column for initialization
# Rows ---> len(val)
# Columns ---> W
t = [[-1 for _ in range(3)] for _ in range(4)]
def knapsack(wts, val, W, length):
# Base Condition:
# Smallest valid weight = 0 kg
# Smallest possible capacity value = 0
# Corresponding to the base condition, we have to return 0 when either knapsack is full(W=0) or when all elements of val are over
for i in range(length+1):
for j in range(W+1):
if i==0 or j==0:
t[i][j] = 0
if t[length][W]!=-1:
return t[length][W]
# Logic here is to traverse the array backward
# See if the weight of a certain element is less than or equal to the capacity of the knapsack
for i in range(1, length+1):
for j in range(1, W+1):
# Case-1: yes, it is.
# There are further 2 choices, either take the element or don't
# So we check profit wise, if the element will yiled greater profit, we'll take it else call the function for the rest of the array
# if wts[length-1]<=W:
# t[length][W] = max((val[length-1] + knapsack(wts, val, W - wts[length-1], length-1)), (knapsack(wts, val, W, length-1)))
# return max((val[length-1] + knapsack(wts, val, W - wts[length-1], length-1)), (knapsack(wts, val, W, length-1)))
if wts[i-1]<=j:
t[i][j] = max((val[i-1] + t[i-1][j-wts[i-1]]), t[i-1][j])
# Casse-2: no
# Then we simple do not need that element and we can move to the remaining array
# else:
# t[length][W] = knapsack(wts, val, W, length-1)
# return knapsack(wts, val, W, length-1)
else:
t[i][j] = t[i-1][j]
for i in range(1, length+1):
for j in range(1, W+1):
if wts[i-1]<=j:
t[i][j] = max((val[i-1] + t[i-1][j-wts[i-1]]), t[i-1][j])
else:
t[i][j] = t[i-1][j]
# Return the value from the intersection of last row and column
return t[length][W]
wts = [1, 1, 1]
val = [10, 20, 30]
W = 2 # This is capacity of the knapsack
# function should return maximum profit
print(knapsack(wts, val, W, len(wts)))
| [
"noreply@github.com"
] | prachi1807.noreply@github.com |
3eddeb775d3577dd33c2264ec122e8e82da69d1e | 2974da49bf6bbc9509563356b311ad062f8352f0 | /thumt/layers/nn.py | 261f2b88aefc43fde929126f5b5964a8054f1d53 | [
"BSD-3-Clause"
] | permissive | WeerayutBu/GCDT | 057bdc5e6a716a860ea1b2a2cb2533bf9eeb53d0 | 5991044307f59598ea224b64f1f3b915fa00ebcc | refs/heads/master | 2022-02-27T08:16:47.081494 | 2019-09-20T08:48:52 | 2019-09-20T08:48:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,837 | py | # coding=utf-8
# Copyright 2018 The THUMT Authors
import tensorflow as tf
def linear(inputs, output_size, bias, concat=True, dtype=None, scope=None):
"""
Linear layer
:param inputs: A Tensor or a list of Tensors with shape [batch, input_size]
:param output_size: An integer specify the output size
:param bias: a boolean value indicate whether to use bias term
:param concat: a boolean value indicate whether to concatenate all inputs
:param dtype: an instance of tf.DType, the default value is ``tf.float32''
:param scope: the scope of this layer, the default value is ``linear''
:returns: a Tensor with shape [batch, output_size]
:raises RuntimeError: raises ``RuntimeError'' when input sizes do not
compatible with each other
"""
with tf.variable_scope(scope, default_name="linear", values=[inputs]):
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
input_size = [item.get_shape()[-1].value for item in inputs]
if len(inputs) != len(input_size):
raise RuntimeError("inputs and input_size unmatched!")
output_shape = tf.concat([tf.shape(inputs[0])[:-1], [output_size]],
axis=0)
# Flatten to 2D
inputs = [tf.reshape(inp, [-1, inp.shape[-1].value]) for inp in inputs]
results = []
if concat:
input_size = sum(input_size)
inputs = tf.concat(inputs, 1)
shape = [input_size, output_size]
matrix = tf.get_variable("matrix", shape, dtype=dtype)
results.append(tf.matmul(inputs, matrix))
else:
for i in range(len(input_size)):
shape = [input_size[i], output_size]
name = "matrix_%d" % i
matrix = tf.get_variable(name, shape, dtype=dtype)
results.append(tf.matmul(inputs[i], matrix))
output = tf.add_n(results)
if bias:
shape = [output_size]
bias = tf.get_variable("bias", shape, dtype=dtype)
output = tf.nn.bias_add(output, bias)
output = tf.reshape(output, output_shape)
return output
def maxout(inputs, output_size, maxpart=2, use_bias=True, concat=True,
dtype=None, scope=None):
"""
Maxout layer
:param inputs: see the corresponding description of ``linear''
:param output_size: see the corresponding description of ``linear''
:param maxpart: an integer, the default value is 2
:param use_bias: a boolean value indicate whether to use bias term
:param concat: concat all tensors if inputs is a list of tensors
:param dtype: an optional instance of tf.Dtype
:param scope: the scope of this layer, the default value is ``maxout''
:returns: a Tensor with shape [batch, output_size]
:raises RuntimeError: see the corresponding description of ``linear''
"""
candidate = linear(inputs, output_size * maxpart, use_bias, concat,
dtype=dtype, scope=scope or "maxout")
shape = tf.concat([tf.shape(candidate)[:-1], [output_size, maxpart]],
axis=0)
value = tf.reshape(candidate, shape)
output = tf.reduce_max(value, -1)
return output
def layer_norm(inputs, epsilon=1e-6, dtype=None, scope=None):
"""
Layer Normalization
:param inputs: A Tensor of shape [..., channel_size]
:param epsilon: A floating number
:param dtype: An optional instance of tf.DType
:param scope: An optional string
:returns: A Tensor with the same shape as inputs
"""
with tf.variable_scope(scope, default_name="layer_norm", values=[inputs],
dtype=dtype):
channel_size = inputs.get_shape().as_list()[-1]
scale = tf.get_variable("scale", shape=[channel_size],
initializer=tf.ones_initializer())
offset = tf.get_variable("offset", shape=[channel_size],
initializer=tf.zeros_initializer())
mean = tf.reduce_mean(inputs, axis=-1, keepdims=True)
variance = tf.reduce_mean(tf.square(inputs - mean), axis=-1, keepdims=True)
norm_inputs = (inputs - mean) * tf.rsqrt(variance + epsilon)
return norm_inputs * scale + offset
def smoothed_softmax_cross_entropy_with_logits(**kwargs):
logits = kwargs.get("logits")
labels = kwargs.get("labels")
smoothing = kwargs.get("smoothing") or 0.0
normalize = kwargs.get("normalize")
scope = kwargs.get("scope")
if logits is None or labels is None:
raise ValueError("Both logits and labels must be provided")
with tf.name_scope(scope or "smoothed_softmax_cross_entropy_with_logits",
values=[logits, labels]):
labels = tf.reshape(labels, [-1])
if not smoothing:
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=labels
)
return ce
# label smoothing
vocab_size = tf.shape(logits)[1]
n = tf.to_float(vocab_size - 1)
p = 1.0 - smoothing
q = smoothing / n
soft_targets = tf.one_hot(tf.cast(labels, tf.int32), depth=vocab_size,
on_value=p, off_value=q)
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=soft_targets)
if normalize is False:
return xentropy
# Normalizing constant is the best cross-entropy value with soft
# targets. We subtract it just for readability, makes no difference on
# learning
normalizing = -(p * tf.log(p) + n * q * tf.log(q + 1e-20))
return xentropy - normalizing
| [
"noreply@github.com"
] | WeerayutBu.noreply@github.com |
cfaa8651b5992022597531e96d3fb64ac7824898 | ee16d40e1786fbdacb4dd16795845b18c367fa7e | /Server/Server.py | 8a4b2f15f0f8c67ed050e8ba4b495802ceed22e2 | [] | no_license | saiswaroop5225/Chat-APP | 0915361fba4b0ed35f50dbf42e8be55989ae9060 | c01c076e85a6e756b39473aa31b6e77ea7b8d82d | refs/heads/main | 2023-03-17T19:04:07.405164 | 2021-03-10T05:32:20 | 2021-03-10T05:32:20 | 345,738,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import time
from person import Person
# GLOBAL CONSTANTS
HOST = 'localhost'
PORT = 999
ADDR = (HOST, PORT)
MAX_CONNECTIONS = 10
BUFSIZ =1024
#GLOBAL VARIABLES
persons = []
# INTIALIZE SERVER
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(ADDR) #set up server
def broadcast(msg,name):
"""
send new message to all clients
:param msg:bytes["utf-8"]
:param name:str
:return:
"""
for person in persons:
client = person.client
client.send(bytes(name + " : " , "utf-8" + msg))
def client_communication(person):
"""
Thread to handle all messages from client
:param client:PERSON
:return:None
"""
run = True
client = person.client
addr = person.addr
#get persons name
name = client.recv(BUFSIZ.decode('utf-8 '))
msg = f"{name} has joined the chat"
broadcast(msg)
while run:
msg = client.recv(BUFSIZ)
if msg !=bytes("{quit}",'utf-8'):
client.send(bytes("{quit}","utf-8"))
client.close()
persons.remove(person)
else:
client.send(msg)
def wait_for_connection(SERVER):
"""
wait for connection from new clients, start new thread once commited
:param SERVER:SOCKET
:return:None
"""
run = True
while run:
try:
client,addr = SERVER.accept()
person = Person(addr,client)
persons.append(person)
print(f"[CONNECTION] {addr} connected to the server at {time.time()}")
Thread(target=client_communication, args=(person,)).start()
except Exception as err:
print("Faliure")
run = False
print("SERVER CRASHED")
if __name__ =='__main__':
SERVER.listen(10) # LISTEN FOR CONNECTIONS
print("[STARTED]Waiting for connections.......")
ACCEPT_THREAD = Thread(target=wait_for_connection)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close() | [
"42674406+saiswaroop5225@users.noreply.github.com"
] | 42674406+saiswaroop5225@users.noreply.github.com |
0e962f90bfe02d167857c860dda5543415903dcc | 07c4798e133b3190c7a4351d6b40fdd2f0a40756 | /Challenge_139.py | 220f548c139b2e03b5515237db6f11ee740b37b6 | [] | no_license | hardhary/PythonByExample | 95f05ded7ea96e824cd0ee453ead97004d069dcd | 15c2c496060b6058fe604f3f0389976ffe9293a2 | refs/heads/master | 2023-03-06T19:05:21.018556 | 2021-02-06T19:55:15 | 2021-02-06T19:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import sqlite3
with sqlite3.connect("PhoneBook.db") as db:
cursor = db.cursor()
cursor.execute(""" CREATE TABLE IF NOT EXISTS Names(
id integer PRIMARY KEY,
firstname text,
surname text,
phonenumber text);""")
cursor.execute("""INSERT INTO Names (id,firstname,surname,phonenumber)
VALUES("1","Simon","Howels","01223 349752") """)
db.commit()
cursor.execute("""INSERT INTO Names (id,firstname,surname,phonenumber)
VALUES("2","Karen","Phillips","01954 295773") """)
db.commit()
cursor.execute("""INSERT INTO Names (id,firstname,surname,phonenumber)
VALUES("3","Darren","Smith","01583 749012") """)
cursor.execute("""INSERT INTO Names (id,firstname,surname,phonenumber)
VALUES("4","Anne","Jones","01323 567322") """)
db.commit()
cursor.execute("""INSERT INTO Names (id,firstname,surname,phonenumber)
VALUES("5","Mark","Smith","01223 855534") """)
db.commit()
db.close
| [
"jonathanvillordon@gmail.com"
] | jonathanvillordon@gmail.com |
a5f5caa88096ea3da71c4e973a2095f500fa7cae | 5417c8af1883ee47688b9027cbcc194dd87d643c | /flask/fameforecaster/model.py | ccde72335e90459fd4664775685e7138c9b6cf99 | [] | no_license | chocochun/FameForecaster | 29c44863f84d5bdac12865982415040420b4f7f2 | 2ba4ec90f235e5317fa079b0976b0776201c391e | refs/heads/master | 2022-12-11T01:54:10.602910 | 2018-02-05T15:01:09 | 2018-02-05T15:01:09 | 119,705,836 | 2 | 1 | null | 2022-12-01T22:41:51 | 2018-01-31T15:32:12 | HTML | UTF-8 | Python | false | false | 558 | py | from wtforms import Form, FloatField, validators
from math import pi
class InputForm(Form):
A = FloatField(
label='amplitude (m)', default=1.0,
validators=[validators.InputRequired()])
b = FloatField(
label='damping factor (kg/s)', default=0,
validators=[validators.InputRequired()])
w = FloatField(
label='frequency (1/s)', default=2*pi,
validators=[validators.InputRequired()])
T = FloatField(
label='time interval (s)', default=18,
validators=[validators.InputRequired()])
| [
"chocochun@gmail.com"
] | chocochun@gmail.com |
316e7365092c96ced14f54d6d4595c49ec57a2ca | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/392/usersdata/314/71167/submittedfiles/formula.py | f70b595a824c5bd58d3737692eea90af30944fcf | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | # -*- coding: utf-8 -*-
print('digite P: '\n)
print('digite i: '\n)
print('digite n: '\n)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9cbf73b313ceadb9a84a8983a41d3478ed5d80c4 | bd4144e919786b4aded4345a2a69ed79e0922946 | /1월 3주차/공통조상.py | 2f554303324d304c1362d167d0514050064e797d | [] | no_license | 2020-ASW/kwoneyng-Park | 670ee027a77c1559f808a51aaf58f27ab3bb85b9 | 3ef556889bbf3f2762c01fdfd10b59869d5e912f | refs/heads/master | 2023-05-14T16:14:04.227511 | 2021-06-11T08:00:37 | 2021-06-11T08:00:37 | 321,286,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | from math import log2
def makeTree(cur, parent):
depth[cur] = depth[parent] + 1 # 자식노드 차수가 부모노드 + 1
dp[cur][0] = parent
for i in range(1,mxL):
upper = dp[cur][i-1] #1 2^n
if upper == 0:
break
dp[cur][i] = dp[upper][i-1]
# dp[13][2] = dp[6][1]
for child in narr[cur]:
cnt[cur] += makeTree(child, cur)
return cnt[cur]
def find(a,b):
if depth[a] == depth[b]:
# start
for i in range(mxL):
if dp[a][i] == dp[b][i]:
if i == 0:
return dp[a][0]
return find(dp[a][i-1], dp[b][i-1])
if depth[a] < depth[b]:
a,b = b,a
for i in range(mxL):
if depth[b] > depth[dp[a][i]]:
return find(dp[a][i-1],b)
for T in range(1,int(input())+1):
v,e,st,ed = map(int,input().split())
data = list(map(int,input().split()))
narr = [[] for _ in range(v+1)]
mxL = int(log2(v))+1 # 최대 점프하는 수
for i in range(e):
narr[data[i*2]].append(data[i*2+1])
depth = [0]*(v+1)
depth[0] = -1
dp = [[0]*mxL for _ in range(v+1)] # dp[node][jump한 수 (2^n)]
cnt = [1]*(v+1)
makeTree(1,0)
ans = find(st,ed)
rs = cnt[ans]
print(ans, rs)
| [
"nan308@naver.com"
] | nan308@naver.com |
bbf936f4182cf543264b0bb7e67b827688cd1be8 | 231682b5285cb6e93ce8ca8f42e8a577b929dd18 | /play/benchmark-test-copy-paste-from-erik-hallstrom.py | 8b8465e2bb2a3decb30d927ab650b750d696d4b4 | [
"MIT"
] | permissive | anthony-sarkis/self-driving-car-nanodegree-nd013 | aca8faf05605b0071895be286bcceaab222015a3 | 910565c71ed789a8da079ba5f6e60a4fce0d65b4 | refs/heads/master | 2021-06-23T13:23:52.218671 | 2017-08-15T19:49:47 | 2017-08-15T19:49:47 | 71,311,641 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | #### 100% of CREDIT TO ERIK HALLSTORM used for benchmarking
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import time
def get_times(maximum_time):
device_times = {
"/gpu:0":[],
"/cpu:0":[]
}
matrix_sizes = range(500,50000,50)
for size in matrix_sizes:
for device_name in device_times.keys():
print("####### Calculating on the " + device_name + " #######")
shape = (size,size)
data_type = tf.float16
with tf.device(device_name):
r1 = tf.random_uniform(shape=shape, minval=0, maxval=1, dtype=data_type)
r2 = tf.random_uniform(shape=shape, minval=0, maxval=1, dtype=data_type)
dot_operation = tf.matmul(r2, r1)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as session:
start_time = time.time()
result = session.run(dot_operation)
time_taken = time.time() - start_time
print(result)
device_times[device_name].append(time_taken)
print(device_times)
if time_taken > maximum_time:
return device_times, matrix_sizes
device_times, matrix_sizes = get_times(1.5)
gpu_times = device_times["/gpu:0"]
cpu_times = device_times["/cpu:0"]
plt.plot(matrix_sizes[:len(gpu_times)], gpu_times, 'o-')
plt.plot(matrix_sizes[:len(cpu_times)], cpu_times, 'o-')
plt.ylabel('Time')
plt.xlabel('Matrix size')
plt.show() | [
"anthonysarkis@gmail.com"
] | anthonysarkis@gmail.com |
96f31bfeb86c80ba89858cec03aa42169c5c1f39 | 9f98ed0db445cd69e22eea9e6cfefa929111fe7f | /setup.py | 8afab05cdee21e2c40619e9211f70e7c7243323a | [] | no_license | zhuyoucai168/talospider | 670c34fc75e709814c1dd9f9f72e0a21e07dee47 | da4f0bdc6f6046c306be5c36d9016b74794823b0 | refs/heads/master | 2020-08-29T05:39:57.661905 | 2019-02-22T06:55:48 | 2019-02-22T06:55:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | #!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name='talospider',
version='0.0.6',
author='Howie Hu',
description="A simple,lightweight scraping micro-framework",
author_email='xiaozizayang@gmail.com',
install_requires=['lxml', 'requests', 'cchardet', 'cssselect'],
url="https://github.com/howie6879/talospider/blob/master/README.md",
packages=find_packages(),
package_data={'talospider': ['utils/*.txt']})
| [
"xiaozizayang@gmail.com"
] | xiaozizayang@gmail.com |
f5ffa547d398fd7b63d8eda40d9c5e9d6396f71d | 0e12572e29aba5e2f10a23ab34d32573ce94da01 | /wesleyVenters/chat.py | df169eeb11286b753e91a8aec882ff046574b13b | [] | no_license | Andre-Aguillard/Siren_2020CyberStorm | 6e39a54b78bcbb29838e667c554d7f532b9b9f09 | b7dbd9d1b1f1c390e73a77d2de98a8c22c393adb | refs/heads/master | 2022-09-18T04:54:09.404336 | 2020-05-17T21:17:21 | 2020-05-17T21:17:21 | 264,212,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | #Wesley Venters
#program 4
#for my program i have made it to where you only have to actually edit the program variable is if
#the zero timing needs to be bigger than the one by making the timing arguments 3 and for
#so when you run the code it should be (python2.7 i believe):
#python chat.py "ip" "port" "zero timing" "one timing"
import socket
from sys import stdout, argv
from time import time
from binascii import unhexlify
# enables debugging output
DEBUG = False
# decode binary
def decode(binary, n):
text = ""
i = 0
while (i < len(binary)):
byte = binary[i:i+n]
byte = int(byte, 2)
if(byte==8):#checks if the integer is th ascii value for backspace
text = text[:-1]#removes the last character of the string text
if(byte!=8):#if it isnt backspace then it continues as normal
text += chr(byte)
i += n
return text
# set the server's IP address and port
ip = argv[1]
port = argv[2]
port = int(port)
#Zero and One timing
ZERO = argv[3]
ZERO = float(ZERO)
ONE = argv[4]
ONE = float(ONE)
# create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connect to the server
s.connect((ip, port))
# receive data until EOF
data = s.recv(4096)
covertbin = ""
counter=0
while (data.rstrip("\n") != "EOF"):
# output the data
stdout.write(data)
stdout.flush()
# start the "timer", get more data, and end the "timer"
t0 = time()
data = s.recv(4096)
t1 = time()
# calculate the time delta (and output if debugging)
delta = round(t1 - t0, 3)
if (delta >= ONE):
covertbin += "1"
elif (delta <= ZERO):
counter+=1
else:
covertbin += "0"
if (DEBUG):
stdout.write(" {}\n".format(delta))
stdout.flush()
#decode and print message
covert = ""
#uncomment for 7 bit
#covert = decode(covertbin, 7)
#uncomment for 8 bit
covert = decode(covertbin,8)
print covert
covert = decode(covertbin,7)
print covert
print covertbin
# close the connection to the server
s.close()
| [
"noreply@github.com"
] | Andre-Aguillard.noreply@github.com |
541ccb08f012dd53867e6a59108a9f660bba337a | 56b2e1bee45b32a0fba2e5400d1449364dbac1ec | /AlgorithmsPart1/medianWithHeap.py | 2db06b7f04975ee316a1d7d12b962e3ee0e8aba5 | [] | no_license | krammandrea/Coursera | c294c44e04efe26d0b82d62f320ba0f4d2d759e2 | c3617c3d5a81a624495d625cf9a726de4477db8c | refs/heads/master | 2021-01-10T07:59:50.269415 | 2014-04-06T03:35:17 | 2014-04-06T03:36:38 | 8,444,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,149 | py | #Input: a stream of numbers added over time
#Output: median of all numbers so far, during every time
#Idea: maintain two heaps of equal size, median is either at the root of
#max-heap or min-heap
class MinHeap(object):
"""
heap represented in an array, root contains the minimum, add element and
remove root supported
"""
def __init__(self):
self.elements = []
def __len__(self):
return len(self.elements)
def parent(self,i):
if i == 0:
return None
else:
return ((i+1)//2)-1
def smallestChild(self,i):
#no children exist
if (2*i)+1 >= len(self):
return None
#only one child exists
elif (2*i)+2 >= len(self):
return (2*i)+1
#find the smaller of two children
elif self.elements[(2*i)+1] < self.elements[(2*i)+2]:
return (2*i)+1
else:
return (2*i)+2
def addElement(self,element):
self.elements.append(element)
self.bubbleUp(toBubbleUp = len(self.elements)-1)
def bubbleUp(self,toBubbleUp):
parent = self.parent(toBubbleUp)
while parent != None and\
self.elements[toBubbleUp] < self.elements[parent]:
self.swap(toBubbleUp,parent)
toBubbleUp = parent
parent = self.parent(toBubbleUp)
def bubbleDown(self,toBubbleDown):
smallestChild = self.smallestChild(toBubbleDown)
while smallestChild != None and\
self.elements[toBubbleDown] > self.elements[smallestChild]:
self.swap(toBubbleDown,smallestChild)
toBubbleDown = smallestChild
smallestChild = self.smallestChild(toBubbleDown)
def swap(self, i, j):
self.elements[i],self.elements[j] = self.elements[j],self.elements[i]
def __str__(self):
import math
representation = str("Heap:")
#calculate newline to present a binary tree
powerOfTwo = [int(math.pow(2,k)) for k in range(0,7)]
newLine = [sum(powerOfTwo[0:x])for x in range(0,len(powerOfTwo))]
for e in range(0,62):
if e in newLine:
representation +="\n"
representation += (str(self.elements[e]))
representation +=" "
return representation
def popRoot(self):
self.swap(0,-1)
#remove root from heap before rearangeing
heapRoot = self.elements.pop(-1)
self.bubbleDown(toBubbleDown = 0)
return heapRoot
class MaxHeap (MinHeap):
"""
heap represented in an array, root contains the maximum, add element and
remove root supported
"""
def __init__(self):
self.elements = []
def bubbleUp(self,toBubbleUp):
while toBubbleUp > 0 and\
self.elements[toBubbleUp] > self.elements[self.parent(toBubbleUp)]:
self.swap(toBubbleUp,self.parent(toBubbleUp))
toBubbleUp = self.parent(toBubbleUp)
def bubbleDown(self,toBubbleDown):
largerChild = self.largerChild(toBubbleDown)
while largerChild != None and\
self.elements[toBubbleDown] < self.elements[largerChild]:
#print "swap toBubbleDown:"+str(self.elements[toBubbleDown])+"with child"+str(self.elements[largerChild])
self.swap(toBubbleDown,largerChild)
toBubbleDown = largerChild
largerChild = self.largerChild(toBubbleDown)
def largerChild(self,i):
#no children exist
if (2*i)+1 >= len(self):
return None
#only one child exists
elif (2*i)+2 >= len(self):
return (2*i)+1
#find the larger of two children
elif self.elements[(2*i)+1] > self.elements[(2*i)+2]:
return (2*i)+1
else:
return (2*i)+2
largerHalf = MinHeap()
smallerHalf = MaxHeap()
accumMedian = 0
inputFile = open("MedianInput.txt","r")
for line in inputFile.readlines():
newElement = int(line.rstrip())
#keep the heapsize equal, and determine the median from the root
#add in the right heap
if len(largerHalf) == 0:
#only largerHalf is checked, this assures sorting the first 2 elements in the right order
smallerHalf.addElement(newElement)
elif newElement > largerHalf.elements[0]:
largerHalf.addElement(newElement)
else:
smallerHalf.addElement(newElement)
#balance out the heapsizes again
if len(largerHalf) > len(smallerHalf):
smallerHalf.addElement(largerHalf.popRoot())
elif len(largerHalf) < len(smallerHalf):
largerHalf.addElement(smallerHalf.popRoot())
else:
pass
#find the median
#So, if k is odd, then mk is ((k+1)/2)th smallest number among x1,...,xk;
#if k is even, then mk is the (k/2)th smallest number among x1,...,xk.
if len(largerHalf) > len(smallerHalf):
median = largerHalf.elements[0]
accumMedian += largerHalf.elements[0]
elif len(largerHalf) <= len(smallerHalf):
median = smallerHalf.elements[0]
accumMedian += smallerHalf.elements[0]
print str(len(smallerHalf)),
print "-",
if len(smallerHalf)>0:
print str(smallerHalf.elements[0]),
print "-",
print median,
print "-",
print str(largerHalf.elements[0]),
print "-",
print str(len(largerHalf)),
print "\n",
print "The summarized median is:"+str(accumMedian)
"""
#test heap functionality, expect ordered numbers from smallest to largest
print "heap from smallest to largest:"
for e in range(len(smallerHalf)):
print str(smallerHalf.popRoot()),
"""
| [
"krammandrea@gmail.com"
] | krammandrea@gmail.com |
0121af025b75095b667e0d0416853d7206c880a4 | ac83d1ddb84ecc904c73bdf779f458bd77efc98c | /src/programy/config/brain/binaries.py | 730ef746b85832414db26d3fdd3828a61fc3a8a2 | [
"MIT"
] | permissive | secrecy27/chatbot | 77829f32a15e17563f038663aebebdb71e52c5a7 | e65a753cf665a4d6d97b57703431cba5331e4f0b | refs/heads/master | 2022-07-24T08:39:57.788009 | 2020-07-16T03:55:21 | 2020-07-16T03:55:21 | 130,678,143 | 4 | 4 | NOASSERTION | 2022-07-06T19:49:14 | 2018-04-23T10:12:01 | Python | UTF-8 | Python | false | false | 2,747 | py | """
Copyright (c) 2016-2018 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.config.section import BaseSectionConfigurationData
class BrainBinariesConfiguration(BaseSectionConfigurationData):
def __init__(self):
BaseSectionConfigurationData.__init__(self, "binaries")
self._save_binary = False
self._load_binary = False
self._binary_filename = None
self._load_aiml_on_binary_fail = False
@property
def save_binary(self):
return self._save_binary
@property
def load_binary(self):
return self._load_binary
@property
def binary_filename(self):
return self._binary_filename
@property
def load_aiml_on_binary_fail(self):
return self._load_aiml_on_binary_fail
def load_config_section(self, configuration_file, configuration, bot_root):
binaries = configuration_file.get_section("binaries", configuration)
if binaries is not None:
self._save_binary = configuration_file.get_option(binaries, "save_binary", missing_value=None)
self._load_binary = configuration_file.get_option(binaries, "load_binary", missing_value=None)
binary_filename = configuration_file.get_option(binaries, "binary_filename", missing_value=None)
if binary_filename is not None:
self._binary_filename = self.sub_bot_root(binary_filename, bot_root)
self._load_aiml_on_binary_fail = configuration_file.get_option(binaries, "load_aiml_on_binary_fail", missing_value=None)
else:
YLogger.warning(self, "'binaries' section missing from bot config, using to defaults")
| [
"secrecy418@naver.com"
] | secrecy418@naver.com |
cc43cfea35b76a03d917ff020c3d23f68dc559c9 | dd0a180ce381184de3f13c097fb1f75fbaae68e4 | /FormTest.py | 87e429501b4254409f350e3ea718efc014e48ba9 | [] | no_license | Nitro5176/ExperimentingWithTkinter | 9f9d39c9cf87acd56fdc1cd6c8f7b8d07ee7dcbc | aa4ec6ac18d4604683e3cf82b484c4fbc8d9c2e2 | refs/heads/main | 2023-07-21T02:51:23.057625 | 2021-09-03T01:15:32 | 2021-09-03T01:15:32 | 387,303,549 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | import tkinter as tk
listOfEntries = ["First Name:", "Last Name:", "Address Line 1:",
"Address Line 2:", "City:", "State/Province:",
"Postal Code:", "Country:"]
mainWindow = tk.Tk()
mainWindow.title("Address Entry Form")
for i in range(8):
for j in range(2):
frame = tk.Frame(master=mainWindow)
if j == 1:
frame.grid(row=i, column=j)
textfield = tk.Entry(master=frame)
textfield.pack()
else:
frame.grid(row=i, column=j)
label = tk.Label(master=frame, text=listOfEntries[i])
label.pack()
frame = tk.Frame(master=mainWindow, relief=tk.RAISED)
frame.grid(row=8, column=0, sticky="e")
frame.grid(row=8, column=1, sticky="e")
cleanButton = tk.Button(master=frame, text="Clean")
submitButton = tk.Button(master=frame, text="Submit")
cleanButton.pack(side=tk.RIGHT)
submitButton.pack(side=tk.TOP)
mainWindow.mainloop()
| [
"st.98.30.03@gmail.com"
] | st.98.30.03@gmail.com |
d3b06bb9eabac99e4515b615ec43151d1574dfd5 | b687a7e0a9a7166f8c3bb30a28d275b126a4d3b7 | /blog/migrations/0001_initial.py | c5520bef1baae0be5acde2c0d3f1764b85a3587b | [] | no_license | IVYTONG0813/my-first-blog | b4b361216515717c90a46dab790547359d0e4fad | e618b8660bc45de1f516227c50b58dc4da7b498b | refs/heads/master | 2021-01-01T19:49:31.567960 | 2017-07-29T06:39:41 | 2017-07-29T06:39:41 | 98,700,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-29 00:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"ivytong0813@gmail.com"
] | ivytong0813@gmail.com |
e49263d6772c3f3b42aca20ff6cb78fbfd338ca0 | dd514dfcd2b2f981effeea45d9b03fa72f8878ed | /Targil3.4.2.py | 5a67774191b7e1fe222ab8e1ef026db7d8fcf9ad | [] | no_license | ShalomVanunu/SelfPy | 556e7237d468bc1b45a98085d7c55833fa602fca | 11421378372178e3b945e4dea5887abbf66e704b | refs/heads/master | 2022-11-11T12:51:09.533203 | 2020-07-05T15:00:22 | 2020-07-05T15:00:22 | 277,325,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | sentence = input("Please enter a string: ")
LETTER = "e"
#word_order = sentence.find(LETTER)
new_sentence = sentence.replace("e","d")
#new_sentence[word_order]="d"
print(new_sentence) | [
"shalomvanunu@gmail.com"
] | shalomvanunu@gmail.com |
a512b8037a74b6513f954b443d63b952a3fc0907 | d39334881a1ff2633eea2874c5a4e6702dd2480b | /project_learning_5.py | 34de6ca1fc91ec44c2d5ff8a930cb879afd201ae | [] | no_license | Phigaro/Experimental-Comparison-to-Improve-Performance-of-Spam-Filter-System | 28364a3d759933d2c34a3872bdf62dd70b251561 | 6e820d6ec2a949d1c46e70be21250e26a9567324 | refs/heads/master | 2021-05-06T15:36:14.509384 | 2017-12-11T14:37:18 | 2017-12-11T14:37:18 | 113,620,705 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,511 | py | #-*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import re
import io
import requests
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from zipfile import ZipFile
from tensorflow.python.framework import ops
ops.reset_default_graph()
TrialCount = 20
# Start a graph
sess = tf.Session()
# Set RNN parameters
# Set Dirffrent Prameters [epochs, rnn_size]
epochsNum = 20
epochs = epochsNum
batch_size = 250
max_sequence_length = 25
rnn_size = 10
embedding_size = 50
min_word_frequency = 10
learning_rate = 0.5
dropout_keep_prob = tf.placeholder(tf.float32)
output_dir = "output"
output_name = "output_learning_5.txt"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
f = open(output_dir + "/" +output_name, 'w')
# Download or open data
data_dir = 'temp'
data_file = 'text_data.txt'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.isfile(os.path.join(data_dir, data_file)):
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
# Save data to text file
with open(os.path.join(data_dir, data_file), 'w') as file_conn:
for text in text_data:
file_conn.write("{}\n".format(text))
else:
# Open data from text file
text_data = []
with open(os.path.join(data_dir, data_file), 'r') as file_conn:
for row in file_conn:
text_data.append(row)
text_data = text_data[:-1]
text_data = [x.split('\t') for x in text_data if len(x)>=1]
[text_data_target, text_data_train] = [list(x) for x in zip(*text_data)]
# Create a text cleaning function
def clean_text(text_string):
text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string)
text_string = " ".join(text_string.split())
text_string = text_string.lower()
return(text_string)
# Clean texts
text_data_train = [clean_text(x) for x in text_data_train]
# Change texts into numeric vectors
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,
min_frequency=min_word_frequency)
text_processed = np.array(list(vocab_processor.fit_transform(text_data_train)))
# Shuffle and split data
text_processed = np.array(text_processed)
text_data_target = np.array([1 if x=='ham' else 0 for x in text_data_target])
shuffled_ix = np.random.permutation(np.arange(len(text_data_target)))
x_shuffled = text_processed[shuffled_ix]
y_shuffled = text_data_target[shuffled_ix]
# Split train/test set
ix_cutoff = int(len(y_shuffled)*0.80)
x_train, x_test = x_shuffled[:ix_cutoff], x_shuffled[ix_cutoff:]
y_train, y_test = y_shuffled[:ix_cutoff], y_shuffled[ix_cutoff:]
vocab_size = len(vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(vocab_size))
print("80-20 Train Test split: {:d} -- {:d}".format(len(y_train), len(y_test)))
# Create placeholders
x_data = tf.placeholder(tf.int32, [None, max_sequence_length])
y_output = tf.placeholder(tf.int32, [None])
# Create embedding
embedding_mat = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
embedding_output = tf.nn.embedding_lookup(embedding_mat, x_data)
#embedding_output_expanded = tf.expand_dims(embedding_output, -1)
# Define the RNN cell
#tensorflow change >= 1.0, rnn is put into tensorflow.contrib directory. Prior version not test.
if tf.__version__[0]>='1':
cell=tf.contrib.rnn.BasicRNNCell(num_units = rnn_size)
else:
cell = tf.nn.rnn_cell.BasicRNNCell(num_units = rnn_size)
output, state = tf.nn.dynamic_rnn(cell, embedding_output, dtype=tf.float32)
output = tf.nn.dropout(output, dropout_keep_prob)
# Get output of RNN sequence
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([rnn_size, 2], stddev=0.1))
bias = tf.Variable(tf.constant(0.1, shape=[2]))
logits_out = tf.matmul(last, weight) + bias
# Loss function
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_out, labels=y_output) # logits=float32, labels=int32
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits_out, 1), tf.cast(y_output, tf.int64)), tf.float32))
precision = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits_out, 1), tf.cast(y_output, tf.int64)), tf.float32))
# presision = tf.metrics.precision(labels=x_data, predictions=y_output)
actuals = tf.cast(y_output, tf.int64)
predictions = tf.argmax(logits_out, 1)
ones_like_actuals = tf.ones_like(actuals)
zeros_like_actuals = tf.zeros_like(actuals)
ones_like_predictions = tf.ones_like(predictions)
zeros_like_predictions = tf.zeros_like(predictions)
tp_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, ones_like_actuals), tf.equal(predictions, ones_like_predictions)), "float"))
tn_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, zeros_like_actuals), tf.equal(predictions, zeros_like_predictions)), "float"))
fp_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, zeros_like_actuals), tf.equal(predictions, ones_like_predictions)), "float"))
fn_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, ones_like_actuals), tf.equal(predictions, zeros_like_predictions)), "float"))
optimizer = tf.train.AdamOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess.run(init)
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
# Start training
i = 0
for i in range(TrialCount):
print "Current Trial : " + str(i+1)
for epoch in range(epochs):
# Shuffle training data
shuffled_ix = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffled_ix]
y_train = y_train[shuffled_ix]
num_batches = int(len(x_train)/batch_size) + 1
# TO DO CALCULATE GENERATIONS ExACTLY
for i in range(num_batches):
# Select train data
min_ix = i * batch_size
max_ix = np.min([len(x_train), ((i+1) * batch_size)])
x_train_batch = x_train[min_ix:max_ix]
y_train_batch = y_train[min_ix:max_ix]
# Run train step
train_dict = {x_data: x_train_batch, y_output: y_train_batch, dropout_keep_prob:0.5}
sess.run(train_step, feed_dict=train_dict)
# Run loss and accuracy for training
temp_train_loss, temp_train_acc = sess.run([loss, accuracy], feed_dict=train_dict)
train_loss.append(temp_train_loss)
train_accuracy.append(temp_train_acc)
# Run Eval Step
test_dict = {x_data: x_test, y_output: y_test, dropout_keep_prob:1.0}
temp_test_loss, temp_test_acc, tp, tn, fp, fn = sess.run([loss, accuracy, tp_op, tn_op, fp_op, fn_op], feed_dict=test_dict)
test_loss.append(temp_test_loss)
test_accuracy.append(temp_test_acc)
tpr = float(tp)/(float(tp) + float(fn))
fpr = float(fp)/(float(tp) + float(fn))
# accuracy = (float(tp) + float(tn))/(float(tp) + float(fp) + float(fn) + float(tn))
recall = tpr
precision = float(tp)/(float(tp) + float(fp))
f1_score = (2 * (precision * recall)) / (precision + recall)
f.write(str(f1_score))
if (epoch+1) != epochsNum:
f.write(", ")
print('Epoch: {}, Test Loss: {:.3}, Test Acc: {:.3}, TP: {} TN: {} FP: {} FN: {}'.format(epoch+1, temp_test_loss, temp_test_acc, tp, tn, fp, fn))
print('Epoch: {}, TPR: {:.3}, FPR: {:.3}, Recall: {:.3} Precision: {:.3} f1_score: {:.3}\n'.format(epoch+1, tpr, fpr, recall, precision, f1_score))
i += 1
f.write("\n")
f.close()
# # Plot loss over time
# epoch_seq = np.arange(1, epochs+1)
# plt.plot(epoch_seq, train_loss, 'k--', label='Train Set')
# plt.plot(epoch_seq, test_loss, 'r-', label='Test Set')
# plt.title('Softmax Loss')
# plt.xlabel('Epochs')
# plt.ylabel('Softmax Loss')
# plt.legend(loc='upper left')
# plt.show()
# # Plot accuracy over time
# plt.plot(epoch_seq, train_accuracy, 'k--', label='Train Set')
# plt.plot(epoch_seq, test_accuracy, 'r-', label='Test Set')
# plt.title('Test Accuracy')
# plt.xlabel('Epochs')
# plt.ylabel('Accuracy')
# plt.legend(loc='upper left')
# plt.show() | [
"fkrlsp2@naver.com"
] | fkrlsp2@naver.com |
f1218b031a16d088561ec348c303d8a31922c5ad | 4678235a48fbf0e16c6a44fa1c7d97044ff2af90 | /Object Oriented Programming/class methods.py | 26cd83227d66b743a3b6e83f93b217cf5a2e9f83 | [] | no_license | prerit1998jain/Information-Systems-Laboratory | 1f1344d3fa5592affd737deaeaa11f8930ee0ff7 | a4ea72bc91ef9a30bf1fa5f9e55d9a3d4310199c | refs/heads/master | 2020-03-28T19:02:01.936262 | 2018-12-16T03:55:13 | 2018-12-16T03:55:13 | 148,937,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | class Employee:
no_of_employees = 0
raise_factor = 1.20
def __init__(self,first,last,pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + last + "@company.com"
Employee.no_of_employees +=1
def annual_income(self):
return self.pay*10
def raise_amount(self):
self.pay = int(self.pay*Employee.raise_factor)
return self.pay
@classmethod
def fromstring(cls,string):
first,last,pay = string.split(' ')
return cls(first,last,pay)
emp_1 = Employee('Het','Shah',200000)
emp_2 = Employee('Nachiketa','Vaghela',100000)
emp_3 = Employee('Kumar','Aniket',150000)
emp_4 = Employee.fromstring('Prerit Jain 250000')
print(emp_4.pay)
| [
"noreply@github.com"
] | prerit1998jain.noreply@github.com |
c286b1a874453c75683d06d59998a489a148fdfc | a48721b16b4d53c039619b9173d7c42e9a3319bb | /snake-game-windows.py | 8724da4845712362dcc56ae396ccccbc353dc14f | [] | no_license | andreimarinx/snake-game | cb8ff1274d86a0bd780ed7f0b6e546caeaa79f39 | 8f3faa2ae2cbd94a85298a0fb279ae67ad494fbc | refs/heads/master | 2022-07-24T18:32:56.525678 | 2020-05-20T21:10:52 | 2020-05-20T21:10:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,862 | py | import os
import keyboard
import time
import random
import sys
clear = lambda: os.system('cls')
x = 8
y = 7
line = 0
length = 1
fruit_x = 8
fruit_y = 9
fruit_line = 0
fruits = 0
w=0
a=0
s=0
d=0
random_fruit_x = 18
random_fruit_y = 38
width = 40
height = 20
while True:
if height == 4:
mare = 1
if height==20:
mare=0
if x==height+1 or y==width+1 or x==0 or y==0 :
clear()
print("Game ended!")
print("Points: "+str(fruits))
break
if fruit_x == x and y == fruit_y:
fruits += 1
length +=0
fruit_x = random.randint(2,random_fruit_x)
fruit_y = random.randint(2, random_fruit_y)
if mare==1:
random_fruit_x += 1
random_fruit_y += 1
height += 1
width += 1
else:
random_fruit_x -=1
random_fruit_y-=1
height-=1
width-=1
print(chr(9619)*(width+1)+chr(9619))
for i in range(height):
line+=1
if line ==x and line == fruit_x:
if y < fruit_y:
print(chr(9619)+ chr(9617)*(y-length)+ chr(9608)*length+chr(9617)*(fruit_y-y-length)+chr(9638)+ chr(9617)*(width-fruit_y)+chr(9619))
else:
print(chr(9619)+ chr(9617) * (fruit_y - 1) +chr(9638)+ chr(9617) * (y - fruit_y - length) + chr(9608)*length + chr(9617)* (width - y)+chr(9619))
elif line == x:
print(chr(9619)+ chr(9617)*(y-length)+ chr(9608)*length+ chr(9617)*(width-y)+chr(9619))
elif line == fruit_x:
print(chr(9619)+ chr(9617) * (fruit_y - 1) + chr(9638) + chr(9617)* (width - fruit_y)+chr(9619))
else:
print(chr(9619)+chr(9617)*width+chr(9619))
print(chr(9619) * (width+1) + chr(9619))
line = 0
if (keyboard.is_pressed('d')):
y += 1
w=0
a=0
s=0
d=1
elif (keyboard.is_pressed('a')):
y -= 1
w = 0
a = 1
s = 0
d = 0
elif (keyboard.is_pressed('w')):
x -= 1
w = 1
a = 0
s = 0
d = 0
elif (keyboard.is_pressed('s')):
x += 1
w = 0
a = 0
s = 1
d = 0
else:
if a==1:
y-=1
if s==1:
x+=1
if d==1:
y+=1
if w==1:
x-=1
if fruits<=10:
time.sleep(0.07)
elif fruits<=20:
time.sleep(0.06)
elif fruits<=30:
time.sleep(0.05)
elif fruits<=45:
time.sleep(0.04)
else:
pass
clear()
time.sleep(0)
print()
print("Y: "+str(x))
print("x: "+str(y))
print("Points: "+str(fruits))
line=0 | [
"noreply@github.com"
] | andreimarinx.noreply@github.com |
ea4b9a1ac6f1aed7d859bf3e833b06decce9082a | dd920206e9ef8b0338cd331a4a4c39f1c5775915 | /camera/views.py | 5077b05cf6c27cc49f38cda18901bf9d4004406c | [] | no_license | Moeedtkxel/moment.ai | 99757933f3b2ef859363eaf4d1506c21ee89eb4c | 475c1d7bc1aa950af04840da841bb8d1414eae3e | refs/heads/master | 2022-12-09T12:34:40.495723 | 2020-02-21T13:04:23 | 2020-02-21T13:04:23 | 242,093,664 | 0 | 0 | null | 2022-12-08T03:39:38 | 2020-02-21T08:45:55 | Python | UTF-8 | Python | false | false | 386 | py | from django.shortcuts import render
from rest_framework import generics
from camera.models import Camera
from camera.serializers import CameraSerializer
class DisplayAllCameras(generics.ListAPIView):
lookup_field = 'pk'
serializer_class = CameraSerializer
queryset = Camera.objects.all()
def get_queryset(self):
return Camera.objects.all()
| [
"moeed.khalid@tkxel.com"
] | moeed.khalid@tkxel.com |
7bf22c70f45685c4ea67f99072c20b8e37fac314 | 099acd6f0d3006070bbefb7647942b32cc74a588 | /candy_shop/apps/delivery/views.py | a218de4bdf6d305aa34578225d46c2313a9e6d1e | [] | no_license | atsanda/candy-shop | 9b3066a48f1b5860c0d08a8e7e32ca331b6a3296 | 8c1eef5f9b1dae1183433e9e6476b9bd1a7c132b | refs/heads/main | 2023-03-30T16:55:24.288676 | 2021-03-25T20:17:36 | 2021-03-25T20:17:36 | 350,831,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | from rest_framework import viewsets, status, mixins
from rest_framework.response import Response
from .models import Courier, Order
from rest_framework.generics import GenericAPIView
from .serializers import (CourierSerializer, OrderSerializer, AssignSerializer,
CompleteOrderSerializer, CourierDetailsSerializer)
from .services import assign_orders, complete_order
class DeliveryCreateMixin(mixins.CreateModelMixin):
entity_name = 'object'
entity_id_field = 'id'
def create(self, request, *args, **kwargs):
is_many = 'data' in request.data
serializer = self.get_serializer(
data=request.data['data'] if is_many else request.data,
many=is_many,
)
if not serializer.is_valid(raise_exception=False):
errors = [e for e in serializer.errors if e]
error_message = {'validation_error': {self.entity_name: errors}}
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
response_data = {
self.entity_name: [
{'id': d[self.entity_id_field]} for d in serializer.data
]
}
return Response(response_data,
status=status.HTTP_201_CREATED,
headers=headers)
class CourierViewSet(DeliveryCreateMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = Courier.objects.all()
entity_name = 'couriers'
entity_id_field = 'courier_id'
def get_serializer_class(self):
if self.action == 'list' or self.action == 'retrieve':
return CourierDetailsSerializer
return CourierSerializer
class OrderViewSet(DeliveryCreateMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = Order.objects.all()
serializer_class = OrderSerializer
entity_name = 'orders'
entity_id_field = 'order_id'
class AssignView(GenericAPIView):
serializer_class = AssignSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
assigned_orders = assign_orders(
serializer.validated_data['courier_id'])
response_data = {'orders': [{'id': o.pk} for o in assigned_orders]}
return Response(response_data, status=status.HTTP_200_OK)
class CompleteOrderView(GenericAPIView):
serializer_class = CompleteOrderSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
order = complete_order(
serializer.validated_data['courier_id'],
serializer.validated_data['order_id'])
response_data = {'order_id': order.pk}
return Response(response_data, status=status.HTTP_200_OK)
| [
"tsanda.artyom@gmail.com"
] | tsanda.artyom@gmail.com |
fcaa1532d63a15d6db0cad531937d07a12345735 | e3d9d6d7866e98cd20afca85e81d9509d5dcd9c8 | /model/RandomForest.py | 03abfb440bae194a2cd5b15efc403498f0ec7bbc | [] | no_license | csienslab-PCC/PathConstraintClassifier | 22d55cb647cb79281b9945a1019c55a8fb2b1eb0 | c5e6052fb93987d70489386b10502d1570ca9692 | refs/heads/master | 2020-04-24T01:59:36.915301 | 2019-03-04T04:30:43 | 2019-03-04T04:30:43 | 171,620,594 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py |
import sys
import pickle
import logging
import numpy as np
from ModelException import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
sys.path.append('/home/enhancing-se/enhancing-se/PathConstraintClassifier')
import config
class RandomForestModel(object):
@property
def logger(self):
name = self.__class__.__name__
return logging.getLogger(name)
def __init__(self, model=None):
self.solver_list = config.solver_list
self.model = model
return
def save(self, model_file_path):
pickle.dump(self.model, open(model_file_path, 'w'))
return
def load(self, model_file_path):
self.model = pickle.load(open(model_file_path, 'r'))
return
def process_data(self, training_data):
return training_data
def train(self, training_data):
training_data = self.process_data(training_data)
model = RandomForestClassifier()
model.fit(training_data['x'], training_data['y'])
self.model = model
print 'train done.'
return
def predict(self, data):
if self.model == None:
raise EmptyModelError("RandomForest")
ans = self.model.predict(data['x'])
return ans
def predict_proba(self, data):
ans = self.model.predict_proba(data['x'])
ret = []
classes = self.model.classes_
for a in ans:
temp = [0.0 for _ in range(len(self.solver_list))]
for i, c in enumerate(classes):
temp[int(c)] = a[i]
ret.append(temp)
return ret
if __name__ == '__main__':
data = np.load(sys.argv[1])
RFM = RandomForestModel()
RFM.train(data)
ans = RFM.predict(data['x'])
count = 0
for i, a in enumerate(ans):
if a == data['y']:
count += 1
print float(count) / len(ans)
| [
"how2play@how2hack.local"
] | how2play@how2hack.local |
addb030e7205b03d033c250b1cbd6dcdaee1db8a | c6634f6a6cfcc13116e2a666fedabd915760ff4b | /food/urls.py | 26b760d816a5c2bdb8e7821038280829fba23a4a | [] | no_license | Saharshsolanki31/finalfoods | 9f4f4aebf800ff1b24133bac411157ca05ebce65 | a7e9debf207c047044a5e0964b66e80585c2bba6 | refs/heads/dev | 2023-06-17T03:04:44.394231 | 2021-07-07T10:19:27 | 2021-07-07T10:19:27 | 383,346,251 | 0 | 0 | null | 2021-07-06T05:46:59 | 2021-07-06T05:08:09 | JavaScript | UTF-8 | Python | false | false | 1,104 | py | """food URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from food import views, settings
urlpatterns = [
path('admin/', admin.site.urls),
path('user/',include('user.urls')),
path('restaurant/',include('restaurant.urls')),
path('',views.index,name="views"),
]+static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"justforhost26@gmail.com"
] | justforhost26@gmail.com |
0190641d0a52f2bb21bdadca9729076fc23f134c | 89149d8612e080d52eee0931e00632fac702e883 | /E94084082-Lab7/game/user_request.py | ec417469b31b6e40fd3da8d88fa31d745deb4066 | [] | no_license | wanyu0105/E94084082_wanyu_Lab7 | 29bc76c9bb7e0ab2f67dbb5804e99c0698c28d77 | 26a470adc020d5ac82c9a7879971d2ea5f6d4171 | refs/heads/main | 2023-07-01T07:23:15.706364 | 2021-08-06T08:32:09 | 2021-08-06T08:32:09 | 393,311,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,411 | py | import pygame
from tower.towers import Tower, Vacancy
"""This module is import in model.py"""
"""
Here we demonstrate how does the Observer Pattern work
Once the subject updates, if will notify all the observer who has register the subject
"""
class RequestSubject:
def __init__(self, model):
self.__observers = []
self.model = model
def register(self, observer):
self.__observers.append(observer)
def notify(self, user_request):
for o in self.__observers:
o.update(user_request, self.model)
class EnemyGenerator:
def __init__(self, subject):
subject.register(self)
def update(self, user_request: str, model):
"""add new enemy"""
if user_request == "start new wave":
model.enemies.add(10)
model.wave += 1
class TowerSeller:
def __init__(self, subject):
subject.register(self)
def update(self, user_request: str, model):
"""sell tower"""
if user_request == "sell":
x, y = model.selected_tower.rect.center
model.money += model.selected_tower.get_cost()
model.plots.append(Vacancy(x, y))
model.towers.remove(model.selected_tower)
model.selected_tower = None
class TowerDeveloper:
def __init__(self, subject):
subject.register(self)
def update(self, user_request: str, model):
"""(Bonus.1) upgrade tower"""
# 若玩家點擊"upgrade"且塔的 level 小於 5,則執行以下動作
if user_request == "upgrade" and model.selected_tower.level < 5:
# 如果玩家擁有的資金大於升級所需花費,則花錢升級塔,並隨之增加塔的攻擊範圍
if model.money > model.selected_tower.get_cost():
model.money -= model.selected_tower.get_cost()
model.selected_tower.level += 1
# if the money > upgrade cost of the selected tower , level+1
# use model.selected_tower to access the selected tower data
# use model.money to access to money data
class TowerFactory:
def __init__(self, subject):
subject.register(self)
self.tower_name = ["pcr", "rapid test", "alcohol"]
def update(self, user_request: str, model):
"""add new tower"""
for name in self.tower_name:
if user_request == name:
x, y = model.selected_plot.rect.center
tower_dict = {"pcr": Tower.PCR(x, y), "rapid test": Tower.RapidTest(x, y), "alcohol": Tower.Alcohol(x, y)}
new_tower = tower_dict[user_request]
if model.money > new_tower.get_cost():
model.money -= new_tower.get_cost()
model.towers.append(new_tower)
model.plots.remove(model.selected_plot)
model.selected_plot = None
class Music:
def __init__(self, subject):
subject.register(self)
def update(self, user_request: str, model):
"""music on"""
if user_request == "music":
pygame.mixer.music.unpause()
model.sound.play()
class Muse:
def __init__(self, subject):
subject.register(self)
def update(self, user_request: str, model):
"""music off"""
if user_request == "mute":
pygame.mixer.music.pause()
model.sound.play()
| [
"noreply@github.com"
] | wanyu0105.noreply@github.com |
fb1829184abe641220c7aa6757bd0f1e6b254663 | 9713af0c7d31faec9eecf7884f308f91eb2f4d09 | /Lab1/Source/Lab-1/Q2.py | 4808a4d8ec0d01e217cb00c36e3301d61a1b3220 | [] | no_license | NamrataDutta/Python-Lab-Assignments | fcd65c6757ffc0b07c3ddaa74222b39630c15c99 | 0275f9f2ffa1050b0850e0c8d57996cff74c2b48 | refs/heads/master | 2021-05-09T04:46:13.895472 | 2018-03-19T04:07:50 | 2018-03-19T04:07:50 | 119,286,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | #User enters the sentence
s = input("Enter the sentence")
#splitting the sentence
mid=s.split()
#calculating the length of the words after the split
a=len(mid)
#if the length of the word is even then it will print 2 words in the middle
if a%2==0:
x=a/2
y=int(x-1)
print("The middle word is :", (mid[y]+" "+mid[y+1]))
else:
x=int(a/2)
print(mid[x])
#checking each word with other and which one has more characters
longest = 0
for word in s.split():
if len(word) > longest:
longest = len(word)
longest_word = word
print("The longest word is : %s" % longest_word)
#each letter goes to the front of the word and joined to get reversed words
def reversed_words(sequence):
return ' '.join(word[::-1] for word in sequence.split())
print("Sentence with reversed words is : ", reversed_words(s))
| [
"nd8gv@mail.umkc.edu"
] | nd8gv@mail.umkc.edu |
4faa9a5e073c7e14b1d3f0fdf39a78e0c5049694 | 2572087b29556ed31dbcb06503d3c0c6da4b2d10 | /25_arrayReplace.py | 39f334e3bf5ad850fb2d34497be34c8d783a5b72 | [] | no_license | Humberto59/CodeSignal-Arcade-Intro | 09867ff4d1ddd680a7c010c7c1400ad0fc1b85c6 | 38a765d3338c5934a820b8bdd050d3b3d0fb7c87 | refs/heads/master | 2020-04-29T09:41:21.426395 | 2019-03-16T23:15:09 | 2019-03-16T23:15:09 | 176,034,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #!/usr/bin/env python
import sys
#25 #Rains of Reason
def arrayReplace(inputArray, elemToReplace, substitutionElem):
""" Main method """
result = None
for i in range(0, len(inputArray)):
val = inputArray[i]
if val == elemToReplace:
inputArray[i] = substitutionElem
return inputArray
def main():
""" Main flow """
arg = [1, 2, 1] ; v1 = 1 ; v2 = 3
print "Input: {0}, {1}, {2} ".format(arg, v1, v2)
out = arrayReplace(arg, v1, v2)
print "output: " + str(out)
if __name__ == '__main__':
sys.exit(main())
| [
"hzamora@localhost.localdomain"
] | hzamora@localhost.localdomain |
582e6d7977304ec94ff5e09011134c56548fddee | 8644a2174c3cb7ccfe211a5e49edffbcc3a74a46 | /HackerrankSolutions/ProblemSolving/DataStructures/LinkedList/Easy/insert_node_doubly_ll.py | 30a3ceddc485daee86a8b335eec39479fd28e2eb | [] | no_license | bhavya2403/Learning-Python | 9e7cc9dee21172321fb217cae27c8072357f71ce | 3898211b357fbab320010a82a4811b68611d0422 | refs/heads/main | 2023-03-24T03:19:49.989965 | 2021-03-22T20:11:04 | 2021-03-22T20:11:04 | 315,962,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | class DoublyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = DoublyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
node.prev = self.tail
self.tail = node
def sortedInsert(head, data):
node = DoublyLinkedListNode(data)
if data < head.data:
node.next = head
head.prev = node
node.prev = None
head = node
return head
curr = head
while curr:
if curr.next is None:
curr.next = node
node.prev = curr
node.next = None
break
if curr.data < data < curr.next.data or curr.data ==data:
node.next = curr.next
node.prev = curr
curr.next = node
curr.next.prev = node
break
curr = curr.next
return head
| [
"noreply@github.com"
] | bhavya2403.noreply@github.com |
2055974086f364ee65b2b2f5b214a51c90acc8ec | b7b54106c32fa01d5f583463f00e8734de3441c9 | /pages/urls.py | a9b5af0bc6936b78f67da3321f4436aa67193e61 | [] | no_license | rexy09/eel | fcc83c1b55c0117c0be7899df9ef0f7b54c005de | b3faf35beaa39168b703d2a98b95df8b32b379ae | refs/heads/master | 2023-03-13T15:23:52.480731 | 2021-03-13T20:16:57 | 2021-03-13T20:16:57 | 342,302,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from django.urls import path
from pages import views
app_name = 'pages'
urlpatterns = [
path('', views.index, name="index"),
path('about', views.about, name="about"),
path('contact', views.contact, name="contact"),
] | [
"fredmuju@gmail.com"
] | fredmuju@gmail.com |
3c40d89ebe58b12e75def3e0190a55e9fe582789 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py | 84eaca27405633ca786ead28b974db2f7f527e5c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,652 | py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
_base_ = [
'../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py'
]
model = dict(
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
74c3218fe7828e80bfe409623a6d2b06a6bda466 | cd84230bd9b168c7911f2266a705251b80d212bd | /Qt5/basic05_statusbar.py | 3e9cbaf54b160a5ec2a0235a02517e4eda847498 | [] | no_license | highgon2/PythonTest | f8b38fc6d9eab6d536591b785acc94049765cdb7 | 1b1bab38e8b09db1084d2abec1d9ea294bfb2de1 | refs/heads/master | 2023-06-25T01:58:11.178296 | 2021-07-28T01:50:06 | 2021-07-28T01:50:06 | 169,029,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
class MyApp(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.statusBar().showMessage("Ready")
self.setWindowTitle("Statusbar")
self.setGeometry(300, 300, 400, 300)
self.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
my_app = MyApp()
sys.exit(app.exec_())
| [
"highgon2@gmail.com"
] | highgon2@gmail.com |
a13551308aa9e21b98aaf389a432068777e34fcf | 9a4e73b83dd050c58adc44f9b646d5c2673877e2 | /backend/user_profile/serializers.py | 4eadcc5763dae42a79ddc9b6a842d530cdc44d72 | [] | no_license | Zaid-2112/Session-django | e23d526c6c6cb8c50fecbefcb341f23761fc828f | 1b5feeff8d9544c3d5cf545d40d697fab8012e40 | refs/heads/master | 2023-05-04T17:32:07.546234 | 2021-05-23T17:50:12 | 2021-05-23T17:50:12 | 365,334,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from rest_framework import serializers
from .models import UserProfile
# Seraialzers for UserProfile model
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = '__all__'
| [
"anasahmed.2199@gmail.com"
] | anasahmed.2199@gmail.com |
b9483e817781707e24858a6055a70b73da0287b3 | 13b7e3297cf597686deb807b56fa3ec0adc37741 | /day01/inputs.py | 28c8c8c5aa7b9c1b418c2eb01a2ac73cd967aa5f | [] | no_license | What-After-College/Python-Batch-056 | 2765e6447ead2c6c980c4bbb754f868536343257 | 5c6bbf29924f60268fca9140fab7787e80570486 | refs/heads/master | 2022-12-03T12:40:59.115161 | 2020-08-29T10:22:04 | 2020-08-29T10:22:04 | 288,370,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | # num = input("Enter a number: ")
# print("The number is: ",num)''
# num1 = int(input("Enter first number: "))
# num2 = int(input("Enter second number: "))
# result = num1+num2
# print("Sum is: ", result)
# # print(type(num1), type(num2))
# num1 = int(input("Enter first number: "))
# num2 = int(input("Enter second Number: "))
# res = num1-num2
# print("Diffrence is: ", res)
# num1 = int(input("Enter first number: "))
# num2 = float(input("Enter second number: "))
# product = num1*num2
# print("The product is: ", product)
# num1 = float(input("Enter a number: "))
# num2 = float(input("Enter another number: "))
# q = num1/num2
# q2 = num1//num2 # integer division
# print(q)
# print(q2)
# 1
# 2
# 3
# 4
# 5
# 6
# 7
# 8
# 9
# 10
# a = 1
# print(a)
# a = a +1
# print(a)
# a = a +1
# print(a)
# a = a +1
# print(a)
# a = a +1
# print(a)
# a = a +1
# print(a)
# a = a +1
# print(a)
# a = a +1
# print(a)
# a = a +1
# print(a)
# a = a +1
# a = 15
# a = 1323
# print(a)
# num1 = 2
# num2 = 100
# res = num1**num2
# print(res)
# num1 = 16
# num2 = 10
# res = num1%num2
# print(res)
# num1 =2
# num2 = 1000000000
# res = num1**num2
# ans = res%100000
# print(ans)
# num1 = 2
# num2 = 1000000000
# mod =100000
# res = pow(num1, num2, mod) # num1**num2%mod
# print(res)
# condition = 0
# if condition:
# print("true")
# else:
# print("false")
# num = int(input("Enter number to check: "))
# if (num%2 == 0):
# print("Even")
# else:
# print("ODD")
a = int(input("Enter 1: "))
b = int(input("Enter 2: "))
c = int(input("Enter 3: "))
# if a>b and a>c:
# print("greatest is, ",a)
# elif a>b and a<c:
# print("greatest is, ", c)
# else:
# print("greatest is, ", b)
if a>b and a>c:
print("greatest is, ",a)
elif c>a and c>b:
print("greatest is, ", c)
else:
print("greatest is, ", b) | [
"mjontop0602@gmail.com"
] | mjontop0602@gmail.com |
956bb462985e994afbbc8b780bfb63ddff9869ec | 6256c21af3fa3bff0bcc4bfe0ffb60ce9aad220c | /projet_python_tk_bouton_input/test_motus.py | ef9de0dbcccd22af80160c2f9117390382a79a98 | [] | no_license | SebastianHac/Projet_Python_Motus | 277a38ac8fed67bfbf152523a802cc64bed0af5f | 37d4468f9423623ca210c18be4203e7bc25b9924 | refs/heads/main | 2023-02-08T12:40:32.427998 | 2021-01-03T22:35:50 | 2021-01-03T22:35:50 | 310,053,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | import unittest
class MotusTestUnit(unittest.TestCase) :
def __init__(self, methodName):
super().__init__(methodName)
self.mot1='voitures'
self.mot2='banane'
self.mot3='qdz4'
self.mot4='voiture4'
self.mot5='voïtures'
self.mot6=''
def test_test_mot(self):
#test si le mot fait la bonne longueur
self.assertEqual(len(self.mot1),8)
self.assertEqual(len(self.mot2),6)
self.assertEqual(len(self.mot3),4)
self.assertEqual(len(self.mot4),8)
self.assertEqual(len(self.mot5),8)
self.assertEqual(len(self.mot6),0)
def test_isalpha(self):
self.assertTrue(self.mot1.isalpha())
self.assertTrue(self.mot2.isalpha())
self.assertFalse(self.mot3.isalpha())
self.assertFalse(self.mot4.isalpha())
self.assertTrue(self.mot5.isalpha())
self.assertFalse(self.mot6.isalpha())
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | SebastianHac.noreply@github.com |
95b8210713497e0cafc453597074e3ea098966f1 | 53d66c5af8adbd58457ba3b24672221a8edb8538 | /students/RussellLarge/RussellLarge-A03/hello_name.py | 1c3e271febd23ed4b742461c45089dc9c65694fc | [] | no_license | russlarge256/Python210-W19 | 5468b80ae14af791efe4c92931008612d3fa63a0 | 3119d9d0425a5c1e49f41aa1fa70fed4372e60a2 | refs/heads/master | 2020-04-19T08:06:50.259300 | 2019-03-17T20:15:43 | 2019-03-17T20:15:43 | 168,066,312 | 0 | 0 | null | 2019-01-29T01:20:25 | 2019-01-29T01:20:25 | null | UTF-8 | Python | false | false | 276 | py | '''
Given a string name, e.g. "Bob", return a greeting of the form "Hello Bob!".
hello_name('Bob') → 'Hello Bob!'
hello_name('Alice') → 'Hello Alice!'
hello_name('X') → 'Hello X!'
'''
def hello_name(name):
if name != False:
return ("Hello" + " " + name + "!")
| [
"russlarge@gmail.com"
] | russlarge@gmail.com |
1a2bf9bf5039e875dc2f03afca4380918f039fa9 | 5a54f52ea01c73b203b0b843273b4d6e3d4a1fd5 | /Python/PythonDjango/wall_erd/apps/wall/migrations/0001_initial.py | 3ce479e183d529f0fbfbfb430679d0b755bb4204 | [] | no_license | rockomatthews/Dojo | 223e8e37d7ecca308f263852a12258098b525f87 | cb223809ab677999f90f7aed5d54951850a1a103 | refs/heads/master | 2020-02-26T15:15:32.906973 | 2017-10-30T17:06:51 | 2017-10-30T17:06:51 | 83,516,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-20 15:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commment', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45)),
('message', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('password', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='message',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wall.User'),
),
migrations.AddField(
model_name='comments',
name='message_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wall.Message'),
),
migrations.AddField(
model_name='comments',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wall.User'),
),
]
| [
"rob.matthews@me.com"
] | rob.matthews@me.com |
0ca53be296d182dd9a97e3c30a691a63d83fbe0d | 2b03b8113a03e7da6e2fc14a525692c4c04626ae | /Paint/paint.py | 25b17269edcff7224d44b65f577167a4020d3f01 | [
"MIT"
] | permissive | Mechatronixyt/Python-Apps | ec20e7dace68bdcfda8d881e427d05e649167088 | d74e650c73c7dde6e73bfc25b0c296f358644f95 | refs/heads/main | 2023-03-18T04:32:10.523800 | 2021-03-20T18:28:29 | 2021-03-20T18:28:29 | 348,685,916 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,201 | py | # creator: @Mechatronix
# date: 20.03.2021
# lisencs: MIT
# contact: mechatronix@mail.de
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenuBar, QMenu, QAction, QFileDialog
from PyQt5.QtGui import QImage, QPainter, QPen
from PyQt5.QtCore import Qt, QPoint
import sys
class Window(QMainWindow):
def __init__(self):
super().__init__()
top = 400
left = 400
width = 800
height = 600
self.setWindowTitle("Painter")
self.setGeometry(top, left, width, height)
self.image = QImage(self.size(), QImage.Format_RGB32)
self.image.fill(Qt.white)
self.drawing = False
self.brush_size = 2
self.brush_color = Qt.black
self.last_point = QPoint
main_menu = self.menuBar()
file_menu = main_menu.addMenu("File")
brush_menu = main_menu.addMenu("Brush Size")
color_menu = main_menu.addMenu("Brush Color")
save_action = QAction("Save", self)
file_menu.addAction(save_action)
save_action.triggered.connect(self.save)
clear_action = QAction("Clear", self)
file_menu.addAction(clear_action)
clear_action.triggered.connect(self.clear)
one_px_action = QAction("1 px", self)
one_px_action.setShortcut("Ctrl+1")
brush_menu.addAction(one_px_action)
one_px_action.triggered.connect(self.one_px)
two_px_action = QAction("2 px", self)
two_px_action.setShortcut("Ctrl+2")
brush_menu.addAction(two_px_action)
two_px_action.triggered.connect(self.two_px)
three_px_action = QAction("3 px", self)
three_px_action.setShortcut("Ctrl+3")
brush_menu.addAction(three_px_action)
three_px_action.triggered.connect(self.three_px)
four_px_action = QAction("4 px", self)
four_px_action.setShortcut("Ctrl+4")
brush_menu.addAction(four_px_action)
four_px_action.triggered.connect(self.four_px)
five_px_action = QAction("5 px", self)
five_px_action.setShortcut("Ctrl+5")
brush_menu.addAction(five_px_action)
five_px_action.triggered.connect(self.five_px)
six_px_action = QAction("6 px", self)
six_px_action.setShortcut("Ctrl+6")
brush_menu.addAction(six_px_action)
six_px_action.triggered.connect(self.six_px)
seven_px_action = QAction("7 px", self)
seven_px_action.setShortcut("Ctrl+7")
brush_menu.addAction(seven_px_action)
seven_px_action.triggered.connect(self.seven_px)
eight_px_action = QAction("8 px", self)
eight_px_action.setShortcut("Ctrl+8")
brush_menu.addAction(eight_px_action)
eight_px_action.triggered.connect(self.eight_px)
nine_px_action = QAction("9 px", self)
nine_px_action.setShortcut("Ctrl+9")
brush_menu.addAction(nine_px_action)
nine_px_action.triggered.connect(self.nine_px)
black_color_action = QAction("Black", self)
black_color_action.setShortcut("Ctrl+Shift+B")
color_menu.addAction(black_color_action)
black_color_action.triggered.connect(self.color_black)
white_color_action = QAction("White", self)
white_color_action.setShortcut("Ctrl+Shift+W")
color_menu.addAction(white_color_action)
white_color_action.triggered.connect(self.color_white)
red_color_action = QAction("Red", self)
red_color_action.setShortcut("Ctrl+R")
color_menu.addAction(red_color_action)
red_color_action.triggered.connect(self.color_red)
green_color_action = QAction("Green", self)
green_color_action.setShortcut("Ctrl+G")
color_menu.addAction(green_color_action)
green_color_action.triggered.connect(self.color_green)
blue_color_action = QAction("Blue", self)
blue_color_action.setShortcut("Ctrl+B")
color_menu.addAction(blue_color_action)
blue_color_action.triggered.connect(self.color_blue)
cyan_color_action = QAction("Cyan", self)
cyan_color_action.setShortcut("Ctrl+C")
color_menu.addAction(cyan_color_action)
cyan_color_action.triggered.connect(self.color_cyan)
yellow_color_action = QAction("Yellow", self)
yellow_color_action.setShortcut("Ctrl+Y")
color_menu.addAction(yellow_color_action)
yellow_color_action.triggered.connect(self.color_yellow)
purple_color_action = QAction("Purple", self)
purple_color_action.setShortcut("Ctrl+P")
color_menu.addAction(purple_color_action)
purple_color_action.triggered.connect(self.color_purple)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.drawing = True
self.last_point = event.pos()
def mouseMoveEvent(self, event):
if (event.buttons() & Qt.LeftButton) & self.drawing:
painter = QPainter(self.image)
painter.setPen(QPen(self.brush_color, self.brush_size, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawLine(self.last_point, event.pos())
self.last_point = event.pos()
self.update()
def mouseReleaseEvent(self, event):
if event.button == Qt.LeftButton:
self.drawing = False
def paintEvent(self, event):
canvas_painter = QPainter(self)
canvas_painter.drawImage(self.rect(), self.image, self.image.rect())
def save(self):
file_path, _ = QFileDialog.getSaveFileName(self, "Save Image", "", "PNG(*.png);;JPEG(*.jpg *.jpeg);;ALL Files(*.*)")
if file_path == "":
return
self.image.save(file_path)
def clear(self):
self.image.fill(Qt.white)
self.update()
def one_px(self):
self.brush_size = 1
def two_px(self):
self.brush_size = 2
def three_px(self):
self.brush_size = 3
def four_px(self):
self.brush_size = 4
def five_px(self):
self.brush_size = 5
def six_px(self):
self.brush_size = 6
def seven_px(self):
self.brush_size = 7
def eight_px(self):
self.brush_size = 8
def nine_px(self):
self.brush_size = 9
def color_black(self):
self.brush_color = Qt.black
def color_white(self):
self.brush_color = Qt.white
def color_red(self):
self.brush_color = Qt.red
def color_green(self):
self.brush_color = Qt.green
def color_blue(self):
self.brush_color = Qt.blue
def color_yellow(self):
self.brush_color = Qt.yellow
def color_cyan(self):
self.brush_color = Qt.cyan
def color_purple(self):
self.brush_color = Qt.magenta
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
window.show()
app.exec() | [
"noreply@github.com"
] | Mechatronixyt.noreply@github.com |
ff6bd559acf6fe9d63ce293ed90c9cd15fbb0a5a | 6c646388e17b41f61a88efec279d751ad2ddcef0 | /min_function2.py | 4737b8fee1dd4426cb7bf00d6bef1f3565741efe | [] | no_license | mj-ribeiro/Math-Statistics-in-Python | 749bde6b72585c43e2088679d72e5f99a1ab2d99 | 74b114208e26437227d182504ae8eeab25f56d09 | refs/heads/master | 2020-12-21T17:23:47.176365 | 2020-07-26T14:25:22 | 2020-07-26T14:25:22 | 236,502,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:46:53 2020
@author: Marcos J Ribeiro
"""
import scipy.optimize as sp
import numpy as np
import matplotlib.pyplot as plt
def f(x):
return x**2 + x - 3
def c1(x):
return f(x) - 0
cons = {'type':'ineq', 'fun':c1}
x0 = 100
sol = sp.minimize(f, x0, method='SLSQP', constraints=cons)
sol
sol.fun
sol.x
x = np.arange(-50, 50, 0.1)
a = list(map(f, x))
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(a)
| [
"mjribeiro@usp.br"
] | mjribeiro@usp.br |
292a70303ab24f11e4e69776cbb9a8f245807a2e | f5aae3ea83268e13b0d2c02ca6d94eedb87ca510 | /code/webserver/django/reporting_app/migrations/0032_auto_20160318_1408.py | dceae7cfabdef470904726962de36beca764ccb2 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | openzero-team/evel-reporting-reference-vnf | 446ad3f8919cb09bc0c7b920450589e32c5ad219 | 39bf9e30a4984f94bfc605d733d05fa491e65835 | refs/heads/master | 2021-06-06T05:06:04.029324 | 2016-10-11T02:47:39 | 2016-10-11T02:47:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,811 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-18 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reporting_app', '0031_auto_20160227_1830'),
]
operations = [
migrations.AlterField(
model_name='mobileflow',
name='cid',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'CID'),
),
migrations.AlterField(
model_name='mobileflow',
name='ecgi',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'ECGI'),
),
migrations.AlterField(
model_name='mobileflow',
name='gtp_connection_status',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'GTP connection status'),
),
migrations.AlterField(
model_name='mobileflow',
name='gtp_protocol_type',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'GTP protocol type'),
),
migrations.AlterField(
model_name='mobileflow',
name='gtp_tunnel_status',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'GTP tunnel status'),
),
migrations.AlterField(
model_name='mobileflow',
name='gtp_version',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'GTP version'),
),
migrations.AlterField(
model_name='mobileflow',
name='http_header',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'HTTP header'),
),
migrations.AlterField(
model_name='mobileflow',
name='imei',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'IMEI'),
),
migrations.AlterField(
model_name='mobileflow',
name='imsi',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'IMSI'),
),
migrations.AlterField(
model_name='mobileflow',
name='ip_protocol_type',
field=models.CharField(blank=True, default='unknown', max_length=64, verbose_name=b'IP protocol type'),
),
migrations.AlterField(
model_name='mobileflow',
name='ip_version',
field=models.CharField(blank=True, default='unknown', max_length=64, verbose_name=b'IP version'),
),
migrations.AlterField(
model_name='mobileflow',
name='lac',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'LAC'),
),
migrations.AlterField(
model_name='mobileflow',
name='mcc',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'MCC'),
),
migrations.AlterField(
model_name='mobileflow',
name='mnc',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'MNC'),
),
migrations.AlterField(
model_name='mobileflow',
name='msisdn',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'MSISDN'),
),
migrations.AlterField(
model_name='mobileflow',
name='num_L7_bytes_received',
field=models.IntegerField(default=0, verbose_name=b'Num L7 bytes received'),
),
migrations.AlterField(
model_name='mobileflow',
name='num_L7_bytes_transmitted',
field=models.IntegerField(default=0, verbose_name=b'Num L7 bytes transmitted'),
),
migrations.AlterField(
model_name='mobileflow',
name='num_gtp_echo_failures',
field=models.IntegerField(blank=True, null=True, verbose_name=b'Num GTP echo failures'),
),
migrations.AlterField(
model_name='mobileflow',
name='num_http_errors',
field=models.IntegerField(blank=True, null=True, verbose_name=b'Num HTTP errors'),
),
migrations.AlterField(
model_name='mobileflow',
name='num_tunneled_L7_bytes_received',
field=models.IntegerField(default=0, verbose_name=b'Num tunneled L7 bytes received'),
),
migrations.AlterField(
model_name='mobileflow',
name='other_endpoint_ip_address',
field=models.CharField(blank=True, default='unknown', max_length=64, verbose_name=b'Other endpoint IP address'),
),
migrations.AlterField(
model_name='mobileflow',
name='rac',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'RAC'),
),
migrations.AlterField(
model_name='mobileflow',
name='sac',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'SAC'),
),
migrations.AlterField(
model_name='mobileflow',
name='tac',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'TAC'),
),
migrations.AlterField(
model_name='mobileflow',
name='tunnel_id',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'Tunnel ID'),
),
migrations.AlterField(
model_name='mobileflow',
name='vlan_id',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name=b'VLAN ID'),
),
]
| [
"blsaws@hotmail.com"
] | blsaws@hotmail.com |
bf4b8314af5c3e90c21ffb2b2e162da2ba70d665 | d45d07f8f6f6bae05a74ab19b75b7d0a8bfc2df1 | /prajesh_gohel/Python/django_intro/time_display/time_display/urls.py | 81822ba50915cd2c165ef5dbcb78d09128f933a1 | [] | no_license | CodingDojoDallas/python_july_2018 | efd4df419c381e8fe504a9b017dabb789281b561 | 5aaf6bd53b838503289ed422cc6d605f79d547d7 | refs/heads/master | 2020-03-22T15:03:22.719556 | 2018-07-17T01:25:22 | 2018-07-17T01:25:22 | 140,226,045 | 0 | 10 | null | 2018-07-25T17:55:36 | 2018-07-09T03:21:21 | Python | UTF-8 | Python | false | false | 737 | py | """time_display URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
urlpatterns = [
path('', include('apps.time_app.urls'))
]
| [
"prajeshgohel@gmail.com"
] | prajeshgohel@gmail.com |
c3b5005a2b16bf465392034a5dd8560026528ce1 | 9318b1885946f639f1446431abc6ec4fa33fc9ac | /Cisco_python/module_4/act-3.py | 4a247a9c4472018c19b3a620743bb178d2405f56 | [] | no_license | mcewenar/PYTHON_INFO_I_BASIC | 1d365bcd3d0186c8955e3cde2605831717d0a412 | e5c3278969b420e7ce03bf7903cf57e63865aaca | refs/heads/master | 2023-06-04T02:26:42.124304 | 2021-06-22T02:48:08 | 2021-06-22T02:48:08 | 326,510,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | #Tu tarea es escribir y probar una función que toma tres argumentos (un año, un mes y un día del mes)
#y devuelve el día correspondiente del año, o devuelve None si cualquiera de los argumentos no es válido.
#Debes utilizar las funciones previamente escritas y probadas. Agrega algunos casos de prueba al código.
#Esta prueba es solo el comienzo.
def isYearLeap(year):
if year % 4 == 0 and (year %100 != 0 or year % 400 == 0):
return True
else:
return False
def daysInMonth(year, month):
if month <= 0 or month > 12 or year < 1582:
return None
else:
if month in [1,3,5,7,8,10,12]:
return 31
elif month == 2:
if isYearLeap(year):
return 29
else:
return 28
else:
return 30
def dayOfYear(year, month, day):
days = 0
for m in range(1, month):
md = daysInMonth(year,m)
if md == None:
return None
days += md
md = daysInMonth(year, month)
if md == None or month == None:
return None
elif day >= 1 and day <= md:
return days + day
else:
return None
while True:
try:
x=int(input("Ingrese un año: "))
y=int(input("Ingrese el mes: "))
z=int(input("Ingrese el día: "))
print(dayOfYear(x, y, z))
except ValueError:
print("No se permite ingresar datos alfanuméricos")
| [
"dmcewena@hotmail.com"
] | dmcewena@hotmail.com |
5c3acbff7b4396dd50b8f5d6b34d27c5a4cd7b7a | 614a1b9144236c77922de9db5e115b7f72e74b1f | /utils.py | e31c657b6e6211cf7a167d0a8f3e73a2af8461ce | [] | no_license | ianjdarrow/mass_pr | e07cb8355986444a245b7da6dc681895e7472355 | 6456b397704da8f0e3e72369f0103eebcc71fb77 | refs/heads/master | 2020-04-19T07:57:41.902915 | 2019-01-31T20:58:08 | 2019-01-31T20:58:08 | 168,062,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | import time
def format_s(delta):
return '{:.3f}s'.format(delta/1e9)
def chunk(data, n):
for i in range(0, len(data), n):
yield data[i:i+n]
| [
"ian.j.darrow@gmail.com"
] | ian.j.darrow@gmail.com |
0318c3573be46cdd69165baf1ea0dba3219f22b1 | 33648419977a4fba65b759eb3a2a974403298340 | /laser_slowing/visualize.py | 3400eca2d6766712cb5623154a5ba5a8a9696f66 | [] | no_license | Andrew-wi/magnetic_lens | 3c3cbd2c5b821f4f43d7b562afbab6610f3d9608 | efe51168018c5e4c9c2055416dc47c3694af448b | refs/heads/master | 2023-05-23T10:00:08.998982 | 2021-06-15T18:05:32 | 2021-06-15T18:05:32 | 262,870,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,990 | py | # ----------------------------------------------------------------------------
# Visualize
# ----------------------------------------------------------------------------
from dependencies import *
from init import *
from vector_field import *
print('Visualizing fields...')
# # Plot magnetization of circular Halbach array
# mMatrixFigSlice, mMatrixAxSlice = plt.subplots()
# hexagonInner = [[R / 2 * np.cos(angle), R / 2 * np.sin(angle)] for angle in np.linspace(0, 2 * np.pi, segs, endpoint=False)]
# hexagonInner.append(hexagonInner[0])
# x1, y1 = list(zip(*hexagonInner))
# hexagonOuter = [[R * np.cos(angle), R * np.sin(angle)] for angle in np.linspace(0, 2 * np.pi, segs, endpoint=False)]
# hexagonOuter.append(hexagonOuter[0])
# x2, y2 = list(zip(*hexagonOuter))
# mMatrixAxSlice.plot(x1, y1, 'k')
# mMatrixAxSlice.plot(x2, y2, 'k')
# mMatrixAxSlice.axis('equal')
# x2d, y2d = np.meshgrid(np.linspace(-R, R, m), np.linspace(-R, R, m))
# mMatrixAxSlice.quiver(x2d, y2d, mxMatrixSlice, myMatrixSlice)
# mMatrixAxSlice.set_title(\
# '2D Slice of Magnetization in Circular Halbach Array')
# mMatrixAxSlice.set_ylabel('y (mm)')
# mMatrixAxSlice.set_xlabel('x (mm)')
Path('{}/magnetization_plots_2D_{}'.format(datetime.date.today(), datetime.date.today())).mkdir(parents=True, exist_ok=True)
# plt.savefig('{}/magnetization_plots_2D_{}/magnetization_2D_{}'format(datetime.date.today(), datetime.date.today(), datetime.date.today()))
# # Plot b-field in three dimensions
# bMatrixFig3D = plt.figure()
# bMatrixAx3D = bMatrixFig3D.gca(projection='3d')
# x, y, z = np.meshgrid(np.linspace(-R/2, R/2, m),
# np.linspace(-R/2, R/2, m),
# np.linspace(-R/2, R/2, m))
# bMatrixAx3D.quiver(x, y, z, bxMatrix, byMatrix, bzMatrix, length=3, \
# normalize=True)
# bMatrixAx3D.set_title('Magnetic Field in Circular Halbach Array')
# bMatrixAx3D.set_xlabel('x (mm)')
# bMatrixAx3D.set_ylabel('y (mm)')
# bMatrixAx3D.set_zlabel('z (mm)')
Path('{}/b_field_plots_{}'.format(datetime.date.today(), datetime.date.today())).mkdir(parents=True, exist_ok=True)
# plt.savefig('{}/b_field_plots_{}/b_field_3D_{}'.format(datetime.date.today(), datetime.date.today(), datetime.date.today()))
# Plot slice of b-field in two dimensions
bMatrixFigSlice, bMatrixAxSlice = plt.subplots()
hexagon = [[R / 2 * np.cos(angle), R / 2 * np.sin(angle)] for angle in np.linspace(0, 2 * np.pi, segs, endpoint=False)]
hexagon.append(hexagon[0])
x, y = list(zip(*hexagon))
bMatrixAxSlice.plot(x, y, 'k')
bMatrixAxSlice.axis('equal')
x2d, y2d = np.meshgrid(np.linspace(-R/2, R/2, m), np.linspace(-R/2, R/2, m))
bxMatrixSlice = bMatrix[:, :, int(m/2), 0]
byMatrixSlice = bMatrix[:, :, int(m/2), 1]
bMatrixAxSlice.quiver(x2d, y2d, bxMatrixSlice, byMatrixSlice)
bMatrixAxSlice.set_title(\
'2D Slice of Magnetic Field in Circular Halbach Array')
bMatrixAxSlice.set_ylabel('y (mm)')
bMatrixAxSlice.set_xlabel('x (mm)')
plt.savefig('{}/b_field_plots_{}/b_field_2D_slice_{}'.format(datetime.date.today(), datetime.date.today(), datetime.date.today()))
# Plot force field
force_fieldSlice2DFig, force_fieldSlice2DAx = plt.subplots()
force_fieldSlice2DAx.plot(x, y, 'k')
x2d, y2d = np.meshgrid(np.linspace(-R/2, R/2, m), np.linspace(-R/2, R/2, m))
forceX = force_field[:, :, int(m/2), 0]
forceY = force_field[:, :, int(m/2), 1]
# todo: get the color right, graded by strength of field
# color = normBMatrix[:, :, int(m/2)]
force_fieldSlice2DAx.quiver(x2d, y2d, forceX, forceY)
force_fieldSlice2DAx.axis('equal')
force_fieldSlice2DAx.set_title(\
'2D Slice of Force Field in Circular Halbach Array Magnetic Field')
force_fieldSlice2DAx.set_ylabel('y (mm)')
force_fieldSlice2DAx.set_xlabel('x (mm)')
Path('{}/force_field_plots_{}'.format(datetime.date.today(), datetime.date.today())).mkdir(parents=True, exist_ok=True)
plt.savefig('{}/force_field_plots_{}/force_field_2D_slice_{}'.format(datetime.date.today(), datetime.date.today(), datetime.date.today()))
| [
"andrew.winnicki.sc@gmail.com"
] | andrew.winnicki.sc@gmail.com |
8791a940c36d9ba54a7cc9c8daf732090787f76e | 64cde3c728f37dac444cd2a83a4f4d9c10c0c568 | /leetcode-1721-swapping-nodes-in-a-linkedin-list/leetcode-1721-swapping-nodes-in-a-linkedin-list.py | ea7fd9479b7ae410bb2e8f4b9036f23002bb423f | [] | no_license | kajalgada-gmr/leetcode-python | 6d2a5b202ba5a2b6ccc5da7ef68f9ee2541040d2 | 01a88bf84130f6f07f5772f222a30c75ca554666 | refs/heads/main | 2023-06-14T23:29:36.670578 | 2021-07-12T19:33:41 | 2021-07-12T19:33:41 | 386,093,610 | 0 | 0 | null | 2021-07-14T22:39:10 | 2021-07-14T22:39:09 | null | UTF-8 | Python | false | false | 1,020 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
# If it is a empty list or just has one node.
if head is None or head.next is None:
return head
# LL index starts at 1. Go ahead (k-1) times to get to k node
k_node = head
for _ in range(1, k):
k_node = k_node.next
# If list had k nodes, 1st node (head) will be kth node from end
k_end_node = head
# Keep going till end of list and move k_end_node one step ahead.
cur_node = k_node
while cur_node.next is not None:
cur_node = cur_node.next
k_end_node = k_end_node.next
# Swap
temp_val = k_node.val
k_node.val = k_end_node.val
k_end_node.val = temp_val
return head
| [
"noreply@github.com"
] | kajalgada-gmr.noreply@github.com |
29d7a98c07c9898895662a98876a970f2954f5a8 | 929ffa6bf0b60f03b1620d280ed70bf40891c74c | /Python/055.py | e28100e776cbf666eb697fd65c6f45c3780105e1 | [
"Unlicense"
] | permissive | arvganesh/Project-Euler-Log | 3aad3928c13c561b83f883fa30d8305c0afe446a | 19434437853870ca9ed54f09eb28615f54dc3202 | refs/heads/master | 2020-04-17T16:38:41.480385 | 2019-08-15T20:42:23 | 2019-08-15T20:42:23 | 67,154,482 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | def isPalindrome(strin): # Checks if Palindrome
instr = str(strin)
numrev = instr[::-1]
if instr == numrev:
return True
return False
def lychrelNum(n):
storageInt = n
reverseNum = int(str(storageInt)[::-1])
count = 0
while count < 50: # 50 Iteration Limit
my_sum = reverseNum + storageInt
if isPalindrome(my_sum):
return False
storageInt = my_sum
reverseNum = int(str(my_sum)[::-1])
count += 1
return True
count = 0
for x in xrange(1, 10001): # 1st 10000 numbers.
if lychrelNum(x):
count += 1
print count
| [
"noreply@github.com"
] | arvganesh.noreply@github.com |
c31952cb741afc8d6221da1766048dabf15ef27b | 7cdbe9eccd815bc8888079abaf5d16201c82e316 | /import_to_cassandra.py | 9bb7c061ce47b766d53e1f011c7ed17192aed5db | [] | no_license | joeljacobson/cassandra_tweet_stream | 99f40ac696e5e33004d764d534f07b377c65cf3a | c1b5e86f5709fa0e7bb82544874958c3b69d27b4 | refs/heads/master | 2021-01-19T05:29:45.362697 | 2016-10-21T20:08:06 | 2016-10-21T20:08:06 | 64,262,023 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | from cassandra.cluster import Cluster
from cassandra.query import BatchStatement
from cassandra.query import SimpleStatement
import json
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.execute(
"""
CREATE KEYSPACE IF NOT EXISTS twitter_data WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1 };
"""
)
session.execute(
"""
CREATE TABLE IF NOT EXISTS twitter_data.tweets ( user_id bigint, user_name text, tweet text, location text, PRIMARY KEY ((user_id), user_name));
"""
)
class ImportToCassandra:
def process_tweet_list(self, list):
batch = BatchStatement()
for data in (list):
try:
data = json.loads(data)
user_id = data['user']['id']
user_name = data['user']['screen_name']
tweet = data['text']
location = data['user']['location']
batch.add(SimpleStatement("INSERT INTO twitter_data.tweets (user_id, user_name, tweet, location) VALUES (%s, %s, %s, %s)"), (user_id, user_name, tweet, location))
except:
pass
session.execute(batch)
| [
"joel@joeljacobson.com"
] | joel@joeljacobson.com |
a9ffbf6927f011eca02197d776c8cdf231525322 | 42ff36f4c6c967d2f39bf75c1f24c8b5911a8491 | /whileloop.py | b570fd612b80873ea6da85ac6f2859b7c5ebf077 | [] | no_license | cal1log/python | 0d47b688e619d0cdd464267225f76fff7d3101a4 | c8196c40e5505d4e83301ada97dd384611660778 | refs/heads/main | 2023-06-29T13:11:31.869976 | 2021-07-27T22:36:04 | 2021-07-27T22:36:04 | 366,841,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | #!/usr/bin/env python3
i = 1
''' incremental while loop '''
while i <= 5:
print('hello calilog')
i += 1
print()
i = 5
''' decremental while loop '''
while i >= 1:
print('hello calilog')
i -= 1
| [
"orlago250183@gmail.com"
] | orlago250183@gmail.com |
121487660dc60511473593213656ba0f82050aa3 | 73c585e3007197e415bd0c10143db1ac191f3519 | /Discussion2.py | a98efd0617f7f8305d062b01a4d8364cfb728851 | [] | no_license | ABoothInTheWild/Northwestern | 785c5b6ff6b65f6f07d6365f7ad3d9d17d093bda | dbe0344833377bbe63a842a1c6b9e694b74c2860 | refs/heads/master | 2021-01-11T16:01:52.318336 | 2019-07-14T18:16:03 | 2019-07-14T18:16:03 | 79,984,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 14 17:33:12 2017
@author: Alexander
"""
import csv
import os
#set wd
os.chdir('C:/Users/Alexander/Documents/Northwestern/Winter 2017/MSPA 400/Python/Session 2')
#open csvs
trainData = open('train.csv', "r").readlines()
trainLabels = open('trainLabels.csv', "r").readlines()
#init lists
trainDataList = []
trainLabelList = []
#strip out the new line character
for line in trainData:
actual_line = line.rstrip('\n')
trainDataList.append(actual_line)
for line in trainLabels:
actual_line = line.rstrip('\n')
trainLabelList.append(actual_line)
#Open the combined csv to write too
i = 0
with open('trainCombined.csv', "w") as output:
writer = csv.writer(output, lineterminator='\n')
#loop through data rows
for row in trainDataList:
#Add label to the beginning
row = trainLabelList[i] + ',' + row
#split on commas and write to cells in a row
writer.writerow([c.strip() for c in row.strip(', ').split(',')])
i = i + 1
| [
"noreply@github.com"
] | ABoothInTheWild.noreply@github.com |
16bec49a939949dec19469329515808a53e2b58d | ddd35c693194aefb9c009fe6b88c52de7fa7c444 | /Live 10.1.18/_NKFW2/ResettingMixerComponent.py | c2477c605980a00da5595cf0a5b14ce75043c10b | [] | no_license | notelba/midi-remote-scripts | 819372d9c22573877c7912091bd8359fdd42585d | e3ec6846470eed7da8a4d4f78562ed49dc00727b | refs/heads/main | 2022-07-30T00:18:33.296376 | 2020-10-04T00:00:12 | 2020-10-04T00:00:12 | 301,003,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: C:\ProgramData\Ableton\Live 9.7 Suite\Resources\MIDI Remote Scripts\_NKFW2\ResettingMixerComponent.py
# Compiled at: 2017-10-14 18:54:45
from itertools import izip_longest
from _Framework.CompoundComponent import CompoundComponent
from _Framework.SubjectSlot import subject_slot
from ResettingChannelStripComponent import ResettingChannelStripComponent
from Utils import right_justify_track_components
justify_function = right_justify_track_components
class ResettingMixerComponent(CompoundComponent):
""" ResettingMixerComponent works with a SlaveManager to control a group of
ResettingChannelStripComponents. """
def __init__(self, slave_manager, num_tracks=8, right_just_returns=True, name='Resetting_Mixer_Control', *a, **k):
super(ResettingMixerComponent, self).__init__(name=name, *a, **k)
self._right_justify_returns = bool(right_just_returns)
self._channel_strips = []
for _ in xrange(num_tracks):
strip = self.register_component(ResettingChannelStripComponent())
self._channel_strips.append(strip)
self._reassign_tracks.subject = slave_manager
self._reassign_tracks(slave_manager.track_offset)
def set_reset_volume_buttons(self, buttons):
""" Sets the buttons to use for resetting volume. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_volume_button(button)
def set_reset_pan_buttons(self, buttons):
""" Sets the buttons to use for resetting pan. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_pan_button(button)
def set_reset_send_a_buttons(self, buttons):
""" Sets the buttons to use for resetting send A. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_send_a_button(button)
def set_reset_send_b_buttons(self, buttons):
""" Sets the buttons to use for resetting send B. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_send_b_button(button)
@subject_slot('track_offset')
def _reassign_tracks(self, offset):
tracks = self._reassign_tracks.subject.tracks_to_use
if self._right_justify_returns:
justify_function(self.song(), tracks, offset, self._channel_strips)
else:
for index, comp in enumerate(self._channel_strips):
track_offset = offset + index
if track_offset in xrange(len(tracks)):
comp.set_track(tracks[track_offset])
else:
comp.set_track(None)
return
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/_NKFW2/ResettingMixerComponent.pyc
| [
"notelba@example.com"
] | notelba@example.com |
59b91296ccaa8963d2281f16b8d11899e58118bf | 2fc1dd57e421458e0c59048a5118668e028f98b3 | /generalized_methods/journal_paper/weighted_SVM/results6/main.py | 9e870fe4ebcfe08f6836e7f9ac1480f213703e1e | [] | no_license | AllenMao/semi_bts_svm | 31bfe5c61d04ba9529e49b4907510a0c26d31696 | a9257497cd026daa97ac218e4d2abf45895f9772 | refs/heads/master | 2021-01-21T21:29:26.729833 | 2015-07-22T23:14:02 | 2015-07-22T23:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | import numpy as np
import os
import sys
import fcntl
import copy
import pdb
sys.path.append('/home/local/USHERBROOKE/havm2701/git.repos/semi_bts_svm/semi_bts_svm/generalized_methods/')
sys.path.append('/home/local/USHERBROOKE/havm2701/git.repos/semi_bts_svm/semi_bts_svm/generalized_methods/journal_paper/')
from string import Template
import mlpython.datasets.store as dataset_store
from mlpython.learners.third_party.libsvm.classification import SVMClassifier
import compute_statistics
import time
import data_utils
from model_svm_6d_hyperparameter_Sensitivity import svm_model as svm_model
from model_svm_6d_hyperparameter_Sensitivity import load_data as load_data
from model_svm_6d_hyperparameter_Sensitivity import svm_model2 as svm_model2
#import ipdb
# make a dictionary of the selected brains with their best c and gamma parameters.
#brain_list = {'HG_0002': [1,50], 'HG_0001': [1,0.01], 'HG_0003': [1,1], 'HG_0010': [1,50], 'HG_0008': [1,5], 'HG_0012': [50,50], 'HG_0011': [1,5], 'HG_0022': [1,5], 'HG_0025': [1,50], 'HG_0027': [1,10], 'LG_0008': [1,200], 'LG_0001': [1,50], 'LG_0006': [1,1], 'LG_0015': [100,200], 'HG_0014': [1,5]}
#'HG_0002': [1,50], 'HG_0001': [1,0.01], 'HG_0003': [1,1], 'HG_0004': [1500,10], 'HG_0005': [1,5], 'HG_0006': [1,200], 'HG_0007': [1500,500], 'HG_0009': [1,5], 'HG_0010': [1,50], 'HG_0008': [1,5], 'HG_0012': [50,50], 'HG_0013': [1,5], 'HG_0011': [1,5], 'HG_0014': [1,5], 'HG_0015': [1,5], 'HG_0022': [1,5], 'HG_0024': [1,50], 'HG_0025': [1,50], 'HG_0026': [50,10], 'HG_0027': [1,10], 'LG_0001': [1,50], 'LG_0002': [10,50], 'LG_0004': [1,100], 'LG_0008': [1,200], 'LG_0006': [1,1], 'LG_0011': [1,1], 'LG_0012': [50,100], 'LG_0014': [1,100], 'LG_0013': [1,1], 'LG_0015': [100,200]
brain_list = { 'HG_0004': [1500,10], 'HG_0005': [1,5], 'HG_0006': [1,200]}
sys.argv.pop(0); # Remove first argument
# Get arguments
dataset_directory = sys.argv[0]
#dataset_name = sys.argv[1]
output_folder = sys.argv[1]
results_path = output_folder + '/libsvm_results/'
if not os.path.exists(results_path):
os.makedirs(results_path)
gammaps = [0.001,0.005, 0.01,0.05,0.1,0.5,1,5,10,50,100,150,200,300,400,500,1000]
#Cs = [1,5,10,25,50,75,100,150,200,250,300,400,500,750,1000,1250,1500]
# measure the sensitivity of gamma for the selected brains and save the text file
brain_names = brain_list.keys()
results_file_c = 'libsvm_measures_C.txt'
results_file_g = 'libsvm_measures_gamma.txt'
for brain in brain_names:
datasets = load_data(dataset_directory , brain)
resultc1, resultc2 = '' ,''
gamma = brain_list[brain][1]
C = brain_list[brain][0]
brain_str = brain + '\t' + 'gamma=' + str(gamma) + ', \t'
brain_str += 'C=' + str(C) + ', \n'
for gammap in gammaps:
paramsc = [ gamma, gammap,C]
dice_c , processed_timec = svm_model2( paramsc, datasets)
#if brain == 'LG_0008':
# pdb.set_trace()
resultc1 += "%.7f" % dice_c + '\t'
resultc2 += "%.4f" % processed_timec + '\t'
resultc1 += '\n'
resultc2 += '\n'
if not os.path.exists(results_path + results_file_c):
with open(results_path + results_file_c,'w') as c:
c.write(brain_str)
c.write(resultc1)
c.write(resultc2)
else:
with open(results_path + results_file_c,'a') as c:
c.write(brain_str)
c.write(resultc1)
c.write(resultc2)
| [
"seyed.mohammad.havaei@usherbrooke.ca"
] | seyed.mohammad.havaei@usherbrooke.ca |
e4987cae312d74aa411a4f12b08a6e756eff56e6 | f7c29c49733f3d0721bc423f15a009c163bde8bd | /Crawler/Deep in Python Web Crawler/Chapter_12/cccsv/cccsv/items.py | 9e8a0393ebe2937dbe2ae601cc9e2b35194f9aaa | [] | no_license | Cylra/py3Learn | 036665ba73b09fdf852a3149603ac1b2da18d92c | 7fac7c548f2183b636ef8d6336e2499e5ceb63a1 | refs/heads/master | 2021-06-20T11:12:02.732047 | 2020-03-31T14:40:58 | 2020-03-31T14:40:58 | 100,771,707 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CccsvItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field()
sex = scrapy.Field()
addr = scrapy.Field()
email= scrapy.Field() | [
"longyu998127@gmail.com"
] | longyu998127@gmail.com |
31c81e8e7598e42e78d696f778d4cfde386ba942 | 07581468815610e4eedc1063d0892838ce5b90e3 | /sammba/externals/nipype/interfaces/fsl/utils.py | 9eeca4d55a65011b9d40f61450cf37bb06076afa | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | salma1601/sammba-mri | 27c687b060950816d471b8796d0e2c4e2a880e90 | c3c79ed806a4e5ce3524bc6053bf0c3ff1444113 | refs/heads/master | 2021-06-25T17:19:14.997755 | 2018-12-01T21:00:50 | 2018-12-01T21:00:50 | 97,102,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90,084 | py | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Examples
--------
See the docstrings of the individual classes for examples.
.. testsetup::
# Change directory to provide relative paths for doctests
import os
filepath = os.path.dirname(os.path.realpath( __file__ ))
datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import map, range
import os
import os.path as op
import re
import fnmatch
from glob import glob
import tempfile
import numpy as np
from ...utils.filemanip import (load_json, save_json, split_filename,
fname_presuffix)
from ..base import (traits, TraitedSpec, OutputMultiPath, File,
CommandLine, CommandLineInputSpec, isdefined)
from .base import FSLCommand, FSLCommandInputSpec, Info
class CopyGeomInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr="%s", position=0,
desc="source image")
dest_file = File(exists=True, mandatory=True, argstr="%s", position=1,
desc="destination image", copyfile=True,
output_name='out_file',
name_source='dest_file', name_template='%s')
ignore_dims = traits.Bool(desc='Do not copy image dimensions',
argstr='-d', position="-1")
class CopyGeomOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="image with new geometry header")
class CopyGeom(FSLCommand):
"""Use fslcpgeom to copy the header geometry information to another image.
Copy certain parts of the header information (image dimensions, voxel
dimensions, voxel dimensions units string, image orientation/origin or
qform/sform info) from one image to another. Note that only copies from
Analyze to Analyze or Nifti to Nifti will work properly. Copying from
different files will result in loss of information or potentially incorrect
settings.
"""
_cmd = "fslcpgeom"
input_spec = CopyGeomInputSpec
output_spec = CopyGeomOutputSpec
class RobustFOVInputSpec(FSLCommandInputSpec):
in_file = File(exists=True,
desc='input filename',
argstr='-i %s', position=0, mandatory=True)
out_roi = File(desc="ROI volume output name", argstr="-r %s",
name_source=['in_file'], hash_files=False,
name_template='%s_ROI')
brainsize = traits.Int(desc=('size of brain in z-dimension (default '
'170mm/150mm)'),
argstr='-b %d')
out_transform = File(desc=("Transformation matrix in_file to out_roi "
"output name"),
argstr="-m %s",
name_source=['in_file'], hash_files=False,
name_template='%s_to_ROI')
class RobustFOVOutputSpec(TraitedSpec):
out_roi = File(exists=True, desc="ROI volume output name")
out_transform = File(exists=True,
desc=("Transformation matrix in_file to out_roi "
"output name"))
class RobustFOV(FSLCommand):
"""Automatically crops an image removing lower head and neck.
Interface is stable 5.0.0 to 5.0.9, but default brainsize changed from
150mm to 170mm.
"""
_cmd = 'robustfov'
input_spec = RobustFOVInputSpec
output_spec = RobustFOVOutputSpec
class ImageMeantsInputSpec(FSLCommandInputSpec):
in_file = File(exists=True,
desc='input file for computing the average timeseries',
argstr='-i %s', position=0, mandatory=True)
out_file = File(desc='name of output text matrix',
argstr='-o %s', genfile=True, hash_files=False)
mask = File(exists=True, desc='input 3D mask', argstr='-m %s')
spatial_coord = traits.List(traits.Int,
desc=('<x y z> requested spatial coordinate '
'(instead of mask)'),
argstr='-c %s')
use_mm = traits.Bool(desc=('use mm instead of voxel coordinates (for -c '
'option)'), argstr='--usemm')
show_all = traits.Bool(desc=('show all voxel time series (within mask) '
'instead of averaging'), argstr='--showall')
eig = traits.Bool(
desc=('calculate Eigenvariate(s) instead of mean (output will have 0 '
'mean)'),
argstr='--eig')
order = traits.Int(1, desc='select number of Eigenvariates',
argstr='--order=%d', usedefault=True)
nobin = traits.Bool(desc=('do not binarise the mask for calculation of '
'Eigenvariates'), argstr='--no_bin')
transpose = traits.Bool(
desc=('output results in transpose format (one row per voxel/mean)'),
argstr='--transpose')
class ImageMeantsOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="path/name of output text matrix")
class ImageMeants(FSLCommand):
""" Use fslmeants for printing the average timeseries (intensities) to
the screen (or saves to a file). The average is taken over all voxels
in the mask (or all voxels in the image if no mask is specified)
"""
_cmd = 'fslmeants'
input_spec = ImageMeantsInputSpec
output_spec = ImageMeantsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
if not isdefined(outputs['out_file']):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_ts',
ext='.txt',
change_ext=True)
outputs['out_file'] = os.path.abspath(outputs['out_file'])
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class SmoothInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr="%s", position=0, mandatory=True)
sigma = traits.Float(
argstr="-kernel gauss %.03f -fmean", position=1, xor=['fwhm'],
mandatory=True,
desc='gaussian kernel sigma in mm (not voxels)')
fwhm = traits.Float(
argstr="-kernel gauss %.03f -fmean", position=1, xor=['sigma'],
mandatory=True,
desc=('gaussian kernel fwhm, will be converted to sigma in mm '
'(not voxels)'))
smoothed_file = File(
argstr="%s", position=2, name_source=['in_file'],
name_template='%s_smooth', hash_files=False)
class SmoothOutputSpec(TraitedSpec):
smoothed_file = File(exists=True)
class Smooth(FSLCommand):
"""
Use fslmaths to smooth the image
Examples
--------
Setting the kernel width using sigma:
>>> sm = Smooth()
>>> sm.inputs.output_type = 'NIFTI_GZ'
>>> sm.inputs.in_file = 'functional2.nii'
>>> sm.inputs.sigma = 8.0
>>> sm.cmdline # doctest: +ELLIPSIS
'fslmaths functional2.nii -kernel gauss 8.000 -fmean functional2_smooth.nii.gz'
Setting the kernel width using fwhm:
>>> sm = Smooth()
>>> sm.inputs.output_type = 'NIFTI_GZ'
>>> sm.inputs.in_file = 'functional2.nii'
>>> sm.inputs.fwhm = 8.0
>>> sm.cmdline # doctest: +ELLIPSIS
'fslmaths functional2.nii -kernel gauss 3.397 -fmean functional2_smooth.nii.gz'
One of sigma or fwhm must be set:
>>> from nipype.interfaces.fsl import Smooth
>>> sm = Smooth()
>>> sm.inputs.output_type = 'NIFTI_GZ'
>>> sm.inputs.in_file = 'functional2.nii'
>>> sm.cmdline #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Smooth requires a value for one of the inputs ...
"""
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
_cmd = 'fslmaths'
def _format_arg(self, name, trait_spec, value):
if name == 'fwhm':
sigma = float(value) / np.sqrt(8 * np.log(2))
return super(Smooth, self)._format_arg(name, trait_spec, sigma)
return super(Smooth, self)._format_arg(name, trait_spec, value)
class MergeInputSpec(FSLCommandInputSpec):
in_files = traits.List(File(exists=True), argstr="%s", position=2,
mandatory=True)
dimension = traits.Enum('t', 'x', 'y', 'z', 'a', argstr="-%s", position=0,
desc=("dimension along which to merge, optionally "
"set tr input when dimension is t"),
mandatory=True)
tr = traits.Float(position=-1, argstr='%.2f',
desc=('use to specify TR in seconds (default is 1.00 '
'sec), overrides dimension and sets it to tr'))
merged_file = File(argstr="%s", position=1, name_source='in_files',
name_template='%s_merged', hash_files=False)
class MergeOutputSpec(TraitedSpec):
merged_file = File(exists=True)
class Merge(FSLCommand):
"""Use fslmerge to concatenate images
Images can be concatenated across time, x, y, or z dimensions. Across the
time (t) dimension the TR is set by default to 1 sec.
Note: to set the TR to a different value, specify 't' for dimension and
specify the TR value in seconds for the tr input. The dimension will be
automatically updated to 'tr'.
Examples
--------
>>> from nipype.interfaces.fsl import Merge
>>> merger = Merge()
>>> merger.inputs.in_files = ['functional2.nii', 'functional3.nii']
>>> merger.inputs.dimension = 't'
>>> merger.inputs.output_type = 'NIFTI_GZ'
>>> merger.cmdline
'fslmerge -t functional2_merged.nii.gz functional2.nii functional3.nii'
>>> merger.inputs.tr = 2.25
>>> merger.cmdline
'fslmerge -tr functional2_merged.nii.gz functional2.nii functional3.nii 2.25'
"""
_cmd = 'fslmerge'
input_spec = MergeInputSpec
output_spec = MergeOutputSpec
def _format_arg(self, name, spec, value):
if name == 'tr':
if self.inputs.dimension != 't':
raise ValueError('When TR is specified, dimension must be t')
return spec.argstr % value
if name == 'dimension':
if isdefined(self.inputs.tr):
return '-tr'
return spec.argstr % value
return super(Merge, self)._format_arg(name, spec, value)
class ExtractROIInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr="%s",
position=0, desc="input file", mandatory=True)
roi_file = File(argstr="%s", position=1,
desc="output file", genfile=True, hash_files=False)
x_min = traits.Int(argstr="%d", position=2)
x_size = traits.Int(argstr="%d", position=3)
y_min = traits.Int(argstr="%d", position=4)
y_size = traits.Int(argstr="%d", position=5)
z_min = traits.Int(argstr="%d", position=6)
z_size = traits.Int(argstr="%d", position=7)
t_min = traits.Int(argstr="%d", position=8)
t_size = traits.Int(argstr="%d", position=9)
_crop_xor = ['x_min', 'x_size', 'y_min',
'y_size', 'z_min', 'z_size', 't_min', 't_size']
crop_list = traits.List(traits.Tuple(traits.Int, traits.Int),
argstr="%s", position=2, xor=_crop_xor,
desc="list of two tuples specifying crop options")
class ExtractROIOutputSpec(TraitedSpec):
roi_file = File(exists=True)
class ExtractROI(FSLCommand):
"""Uses FSL Fslroi command to extract region of interest (ROI)
from an image.
You can a) take a 3D ROI from a 3D data set (or if it is 4D, the
same ROI is taken from each time point and a new 4D data set is
created), b) extract just some time points from a 4D data set, or
c) control time and space limits to the ROI. Note that the
arguments are minimum index and size (not maximum index). So to
extract voxels 10 to 12 inclusive you would specify 10 and 3 (not
10 and 12).
Examples
--------
>>> from nipype.interfaces.fsl import ExtractROI
>>> from nipype.testing import anatfile
>>> fslroi = ExtractROI(in_file=anatfile, roi_file='bar.nii', t_min=0,
... t_size=1)
>>> fslroi.cmdline == 'fslroi %s bar.nii 0 1' % anatfile
True
"""
_cmd = 'fslroi'
input_spec = ExtractROIInputSpec
output_spec = ExtractROIOutputSpec
def _format_arg(self, name, spec, value):
if name == "crop_list":
return " ".join(map(str, sum(list(map(list, value)), [])))
return super(ExtractROI, self)._format_arg(name, spec, value)
def _list_outputs(self):
"""Create a Bunch which contains all possible files generated
by running the interface. Some files are always generated, others
depending on which ``inputs`` options are set.
Returns
-------
outputs : Bunch object
Bunch object containing all possible files generated by
interface object.
If None, file was not generated
Else, contains path, filename of generated outputfile
"""
outputs = self._outputs().get()
outputs['roi_file'] = self.inputs.roi_file
if not isdefined(outputs['roi_file']):
outputs['roi_file'] = self._gen_fname(self.inputs.in_file,
suffix='_roi')
outputs['roi_file'] = os.path.abspath(outputs['roi_file'])
return outputs
def _gen_filename(self, name):
if name == 'roi_file':
return self._list_outputs()[name]
return None
class SliceInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr="%s", position=0, mandatory=True,
desc="input filename", copyfile=False)
out_base_name = traits.Str(argstr="%s", position=1, desc="outputs prefix")
class SliceOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True))
class Slice(FSLCommand):
"""Use fslslice to split a 3D file into lots of 2D files (along z-axis).
Examples
--------
>>> from nipype.interfaces.fsl import Slice
>>> slice = Slice()
>>> slice.inputs.in_file = 'functional.nii'
>>> slice.inputs.out_base_name = 'sl'
>>> slice.cmdline
'fslslice functional.nii sl'
"""
_cmd = 'fslslice'
input_spec = SliceInputSpec
output_spec = SliceOutputSpec
def _list_outputs(self):
"""Create a Bunch which contains all possible files generated
by running the interface. Some files are always generated, others
depending on which ``inputs`` options are set.
Returns
-------
outputs : Bunch object
Bunch object containing all possible files generated by
interface object.
If None, file was not generated
Else, contains path, filename of generated outputfile
"""
outputs = self._outputs().get()
ext = Info.output_type_to_ext(self.inputs.output_type)
suffix = '_slice_*' + ext
exact_pattern = '_slice_[0-9][0-9][0-9][0-9]' + ext
if isdefined(self.inputs.out_base_name):
fname_template = os.path.abspath(
self.inputs.out_base_name + suffix)
fname_exact_pattern = os.path.abspath(
self.inputs.out_base_name + exact_pattern)
else:
fname_template = fname_presuffix(self.inputs.in_file,
suffix=suffix, use_ext=False)
fname_exact_pattern = fname_presuffix(self.inputs.in_file,
suffix=exact_pattern, use_ext=False)
outputs['out_files'] = fnmatch.filter(sorted(glob(fname_template)), fname_exact_pattern)
return outputs
class SplitInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr="%s", position=0, mandatory=True,
desc="input filename")
out_base_name = traits.Str(argstr="%s", position=1, desc="outputs prefix")
dimension = traits.Enum(
't', 'x', 'y', 'z', argstr="-%s", position=2,
mandatory=True,
desc="dimension along which the file will be split")
class SplitOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True))
class Split(FSLCommand):
"""Uses FSL Fslsplit command to separate a volume into images in
time, x, y or z dimension.
"""
_cmd = 'fslsplit'
input_spec = SplitInputSpec
output_spec = SplitOutputSpec
def _list_outputs(self):
"""Create a Bunch which contains all possible files generated
by running the interface. Some files are always generated, others
depending on which ``inputs`` options are set.
Returns
-------
outputs : Bunch object
Bunch object containing all possible files generated by
interface object.
If None, file was not generated
Else, contains path, filename of generated outputfile
"""
outputs = self._outputs().get()
ext = Info.output_type_to_ext(self.inputs.output_type)
outbase = 'vol*'
if isdefined(self.inputs.out_base_name):
outbase = '%s*' % self.inputs.out_base_name
outputs['out_files'] = sorted(glob(os.path.join(os.getcwd(),
outbase + ext)))
return outputs
class ImageMathsInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr="%s", mandatory=True, position=1)
in_file2 = File(exists=True, argstr="%s", position=3)
out_file = File(argstr="%s", position=4, genfile=True, hash_files=False)
op_string = traits.Str(argstr="%s", position=2,
desc="string defining the operation, i. e. -add")
suffix = traits.Str(desc="out_file suffix")
out_data_type = traits.Enum('char', 'short', 'int', 'float', 'double',
'input', argstr="-odt %s", position=5,
desc=("output datatype, one of (char, short, "
"int, float, double, input)"))
class ImageMathsOutputSpec(TraitedSpec):
out_file = File(exists=True)
class ImageMaths(FSLCommand):
"""Use FSL fslmaths command to allow mathematical manipulation of images
`FSL info <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm#fslutils>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import anatfile
>>> maths = fsl.ImageMaths(in_file=anatfile, op_string= '-add 5',
... out_file='foo_maths.nii')
>>> maths.cmdline == 'fslmaths %s -add 5 foo_maths.nii' % anatfile
True
"""
input_spec = ImageMathsInputSpec
output_spec = ImageMathsOutputSpec
_cmd = 'fslmaths'
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
def _parse_inputs(self, skip=None):
return super(ImageMaths, self)._parse_inputs(skip=['suffix'])
def _list_outputs(self):
suffix = '_maths' # ohinds: build suffix
if isdefined(self.inputs.suffix):
suffix = self.inputs.suffix
outputs = self._outputs().get()
outputs['out_file'] = self.inputs.out_file
if not isdefined(outputs['out_file']):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix=suffix)
outputs['out_file'] = os.path.abspath(outputs['out_file'])
return outputs
class FilterRegressorInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr="-i %s",
desc="input file name (4D image)", mandatory=True,
position=1)
out_file = File(argstr="-o %s",
desc="output file name for the filtered data",
genfile=True, position=2, hash_files=False)
design_file = File(exists=True, argstr="-d %s", position=3, mandatory=True,
desc=("name of the matrix with time courses (e.g. GLM "
"design or MELODIC mixing matrix)"))
filter_columns = traits.List(
traits.Int, argstr="-f '%s'",
xor=["filter_all"], mandatory=True,
position=4,
desc=("(1-based) column indices to filter out of the data"))
filter_all = traits.Bool(mandatory=True, argstr="-f '%s'",
xor=["filter_columns"], position=4,
desc=("use all columns in the design file in "
"denoising"))
mask = File(exists=True, argstr="-m %s", desc="mask image file name")
var_norm = traits.Bool(argstr="--vn",
desc="perform variance-normalization on data")
out_vnscales = traits.Bool(argstr="--out_vnscales",
desc=("output scaling factors for variance "
"normalization"))
class FilterRegressorOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output file name for the filtered data")
class FilterRegressor(FSLCommand):
"""Data de-noising by regressing out part of a design matrix
Uses simple OLS regression on 4D images
"""
input_spec = FilterRegressorInputSpec
output_spec = FilterRegressorOutputSpec
_cmd = 'fsl_regfilt'
def _format_arg(self, name, trait_spec, value):
if name == 'filter_columns':
return trait_spec.argstr % ",".join(map(str, value))
elif name == "filter_all":
design = np.loadtxt(self.inputs.design_file)
try:
n_cols = design.shape[1]
except IndexError:
n_cols = 1
return trait_spec.argstr % ",".join(
map(str, list(range(1, n_cols + 1))))
return super(FilterRegressor, self)._format_arg(
name, trait_spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
if not isdefined(outputs['out_file']):
outputs['out_file'] = self._gen_fname(
self.inputs.in_file, suffix='_regfilt')
outputs['out_file'] = os.path.abspath(outputs['out_file'])
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class ImageStatsInputSpec(FSLCommandInputSpec):
split_4d = traits.Bool(argstr='-t', position=1,
desc=('give a separate output line for each 3D '
'volume of a 4D timeseries'))
in_file = File(exists=True, argstr="%s", mandatory=True, position=2,
desc='input file to generate stats of')
op_string = traits.Str(argstr="%s", mandatory=True, position=3,
desc=("string defining the operation, options are "
"applied in order, e.g. -M -l 10 -M will "
"report the non-zero mean, apply a threshold "
"and then report the new nonzero mean"))
mask_file = File(exists=True, argstr="",
desc='mask file used for option -k %s')
class ImageStatsOutputSpec(TraitedSpec):
out_stat = traits.Any(desc='stats output')
class ImageStats(FSLCommand):
"""Use FSL fslstats command to calculate stats from images
`FSL info
<http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm#fslutils>`_
Examples
--------
>>> from nipype.interfaces.fsl import ImageStats
>>> from nipype.testing import funcfile
>>> stats = ImageStats(in_file=funcfile, op_string= '-M')
>>> stats.cmdline == 'fslstats %s -M'%funcfile
True
"""
input_spec = ImageStatsInputSpec
output_spec = ImageStatsOutputSpec
_cmd = 'fslstats'
def _format_arg(self, name, trait_spec, value):
if name == 'mask_file':
return ''
if name == 'op_string':
if '-k %s' in self.inputs.op_string:
if isdefined(self.inputs.mask_file):
return self.inputs.op_string % self.inputs.mask_file
else:
raise ValueError(
'-k %s option in op_string requires mask_file')
return super(ImageStats, self)._format_arg(name, trait_spec, value)
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
# local caching for backward compatibility
outfile = os.path.join(os.getcwd(), 'stat_result.json')
if runtime is None:
try:
out_stat = load_json(outfile)['stat']
except IOError:
return self.run().outputs
else:
out_stat = []
for line in runtime.stdout.split('\n'):
if line:
values = line.split()
if len(values) > 1:
out_stat.append([float(val) for val in values])
else:
out_stat.extend([float(val) for val in values])
if len(out_stat) == 1:
out_stat = out_stat[0]
save_json(outfile, dict(stat=out_stat))
outputs.out_stat = out_stat
return outputs
class AvScaleInputSpec(CommandLineInputSpec):
all_param = traits.Bool(False, argstr='--allparams')
mat_file = File(exists=True, argstr='%s',
desc='mat file to read', position=-2)
ref_file = File(exists=True, argstr='%s', position=-1,
desc='reference file to get center of rotation')
class AvScaleOutputSpec(TraitedSpec):
rotation_translation_matrix = traits.List(
traits.List(traits.Float), desc='Rotation and Translation Matrix')
scales = traits.List(traits.Float, desc='Scales (x,y,z)')
skews = traits.List(traits.Float, desc='Skews')
average_scaling = traits.Float(desc='Average Scaling')
determinant = traits.Float(desc='Determinant')
forward_half_transform = traits.List(
traits.List(traits.Float), desc='Forward Half Transform')
backward_half_transform = traits.List(
traits.List(traits.Float), desc='Backwards Half Transform')
left_right_orientation_preserved = traits.Bool(
desc='True if LR orientation preserved')
rot_angles = traits.List(traits.Float, desc='rotation angles')
translations = traits.List(traits.Float, desc='translations')
class AvScale(CommandLine):
"""Use FSL avscale command to extract info from mat file output of FLIRT
Examples
--------
>>> avscale = AvScale()
>>> avscale.inputs.mat_file = 'flirt.mat'
>>> res = avscale.run() # doctest: +SKIP
"""
input_spec = AvScaleInputSpec
output_spec = AvScaleOutputSpec
_cmd = 'avscale'
def _run_interface(self, runtime):
runtime = super(AvScale, self)._run_interface(runtime)
expr = re.compile(
'Rotation\ &\ Translation\ Matrix:\n(?P<rot_tran_mat>[0-9\.\ \n-]+)[\s\n]*'
'(Rotation\ Angles\ \(x,y,z\)\ \[rads\]\ =\ (?P<rot_angles>[0-9\.\ -]+))?[\s\n]*'
'(Translations\ \(x,y,z\)\ \[mm\]\ =\ (?P<translations>[0-9\.\ -]+))?[\s\n]*'
'Scales\ \(x,y,z\)\ =\ (?P<scales>[0-9\.\ -]+)[\s\n]*'
'Skews\ \(xy,xz,yz\)\ =\ (?P<skews>[0-9\.\ -]+)[\s\n]*'
'Average\ scaling\ =\ (?P<avg_scaling>[0-9\.-]+)[\s\n]*'
'Determinant\ =\ (?P<determinant>[0-9\.-]+)[\s\n]*'
'Left-Right\ orientation:\ (?P<lr_orientation>[A-Za-z]+)[\s\n]*'
'Forward\ half\ transform\ =[\s]*\n'
'(?P<fwd_half_xfm>[0-9\.\ \n-]+)[\s\n]*'
'Backward\ half\ transform\ =[\s]*\n'
'(?P<bwd_half_xfm>[0-9\.\ \n-]+)[\s\n]*')
out = expr.search(runtime.stdout).groupdict()
outputs = {}
outputs['rotation_translation_matrix'] = [[
float(v) for v in r.strip().split(' ')]
for r in out['rot_tran_mat'].strip().split('\n')]
outputs['scales'] = [
float(s) for s in out['scales'].strip().split(' ')]
outputs['skews'] = [float(s) for s in out['skews'].strip().split(' ')]
outputs['average_scaling'] = float(out['avg_scaling'].strip())
outputs['determinant'] = float(out['determinant'].strip())
outputs['left_right_orientation_preserved'] = out[
'lr_orientation'].strip() == 'preserved'
outputs['forward_half_transform'] = [[
float(v) for v in r.strip().split(' ')]
for r in out['fwd_half_xfm'].strip().split('\n')]
outputs['backward_half_transform'] = [[
float(v) for v in r.strip().split(' ')]
for r in out['bwd_half_xfm'].strip().split('\n')]
if self.inputs.all_param:
outputs['rot_angles'] = [
float(r) for r in out['rot_angles'].strip().split(' ')]
outputs['translations'] = [
float(r) for r in out['translations'].strip().split(' ')]
setattr(self, '_results', outputs)
return runtime
def _list_outputs(self):
return self._results
class OverlayInputSpec(FSLCommandInputSpec):
transparency = traits.Bool(desc='make overlay colors semi-transparent',
position=1, argstr='%s', usedefault=True,
default_value=True)
out_type = traits.Enum('float', 'int', position=2, usedefault=True,
argstr='%s',
desc='write output with float or int')
use_checkerboard = traits.Bool(desc='use checkerboard mask for overlay',
argstr='-c', position=3)
background_image = File(exists=True, position=4, mandatory=True,
argstr='%s', desc='image to use as background')
_xor_inputs = ('auto_thresh_bg', 'full_bg_range', 'bg_thresh')
auto_thresh_bg = traits.Bool(
desc=('automatically threshold the background image'),
argstr='-a', position=5,
xor=_xor_inputs, mandatory=True)
full_bg_range = traits.Bool(desc='use full range of background image',
argstr='-A', position=5, xor=_xor_inputs,
mandatory=True)
bg_thresh = traits.Tuple(
traits.Float, traits.Float, argstr='%.3f %.3f',
position=5,
desc='min and max values for background intensity',
xor=_xor_inputs, mandatory=True)
stat_image = File(exists=True, position=6, mandatory=True, argstr='%s',
desc='statistical image to overlay in color')
stat_thresh = traits.Tuple(traits.Float, traits.Float, position=7,
mandatory=True, argstr='%.2f %.2f',
desc=('min and max values for the statistical '
'overlay'))
show_negative_stats = traits.Bool(desc=('display negative statistics in '
'overlay'), xor=['stat_image2'],
argstr='%s', position=8)
stat_image2 = File(exists=True, position=9, xor=['show_negative_stats'],
argstr='%s',
desc='second statistical image to overlay in color')
stat_thresh2 = traits.Tuple(traits.Float, traits.Float, position=10,
desc=('min and max values for second '
'statistical overlay'),
argstr='%.2f %.2f')
out_file = File(desc='combined image volume',
position=-1, argstr='%s', genfile=True, hash_files=False)
class OverlayOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='combined image volume')
class Overlay(FSLCommand):
""" Use FSL's overlay command to combine background and statistical images
into one volume
Examples
--------
>>> from nipype.interfaces import fsl
>>> combine = fsl.Overlay()
>>> combine.inputs.background_image = 'mean_func.nii.gz'
>>> combine.inputs.auto_thresh_bg = True
>>> combine.inputs.stat_image = 'zstat1.nii.gz'
>>> combine.inputs.stat_thresh = (3.5, 10)
>>> combine.inputs.show_negative_stats = True
>>> res = combine.run() #doctest: +SKIP
"""
_cmd = 'overlay'
input_spec = OverlayInputSpec
output_spec = OverlayOutputSpec
def _format_arg(self, name, spec, value):
if name == 'transparency':
if value:
return '1'
else:
return '0'
if name == 'out_type':
if value == 'float':
return '0'
else:
return '1'
if name == 'show_negative_stats':
return '%s %.2f %.2f' % (self.inputs.stat_image,
self.inputs.stat_thresh[0] * -1,
self.inputs.stat_thresh[1] * -1)
return super(Overlay, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
if isdefined(self.inputs.stat_image2) and (
not isdefined(self.inputs.show_negative_stats) or not
self.inputs.show_negative_stats):
stem = "%s_and_%s" % (
split_filename(self.inputs.stat_image)[1],
split_filename(self.inputs.stat_image2)[1])
else:
stem = split_filename(self.inputs.stat_image)[1]
out_file = self._gen_fname(stem, suffix='_overlay')
outputs['out_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['out_file']
return None
class SlicerInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, position=1, argstr='%s', mandatory=True,
desc='input volume')
image_edges = File(exists=True, position=2, argstr='%s',
desc=('volume to display edge overlay for (useful for '
'checking registration'))
label_slices = traits.Bool(
position=3, argstr='-L', desc='display slice number',
usedefault=True, default_value=True)
colour_map = File(exists=True, position=4, argstr='-l %s',
desc=('use different colour map from that stored in '
'nifti header'))
intensity_range = traits.Tuple(traits.Float, traits.Float, position=5,
argstr='-i %.3f %.3f',
desc='min and max intensities to display')
threshold_edges = traits.Float(position=6, argstr='-e %.3f',
desc='use threshold for edges')
dither_edges = traits.Bool(position=7, argstr='-t',
desc=('produce semi-transparent (dithered) '
'edges'))
nearest_neighbour = traits.Bool(position=8, argstr='-n',
desc=('use nearest neighbor interpolation '
'for output'))
show_orientation = traits.Bool(position=9, argstr='%s', usedefault=True,
default_value=True,
desc='label left-right orientation')
_xor_options = ('single_slice', 'middle_slices', 'all_axial',
'sample_axial')
single_slice = traits.Enum(
'x', 'y', 'z', position=10, argstr='-%s',
xor=_xor_options, requires=['slice_number'],
desc=('output picture of single slice in the x, y, or z plane'))
slice_number = traits.Int(position=11, argstr='-%d',
desc='slice number to save in picture')
middle_slices = traits.Bool(position=10, argstr='-a', xor=_xor_options,
desc=('output picture of mid-sagittal, axial, '
'and coronal slices'))
all_axial = traits.Bool(position=10, argstr='-A', xor=_xor_options,
requires=['image_width'],
desc='output all axial slices into one picture')
sample_axial = traits.Int(position=10, argstr='-S %d',
xor=_xor_options, requires=['image_width'],
desc=('output every n axial slices into one '
'picture'))
image_width = traits.Int(
position=-2, argstr='%d', desc='max picture width')
out_file = File(position=-1, genfile=True, argstr='%s',
desc='picture to write', hash_files=False)
scaling = traits.Float(position=0, argstr='-s %f', desc='image scale')
class SlicerOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='picture to write')
class Slicer(FSLCommand):
"""Use FSL's slicer command to output a png image from a volume.
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> slice = fsl.Slicer()
>>> slice.inputs.in_file = example_data('functional.nii')
>>> slice.inputs.all_axial = True
>>> slice.inputs.image_width = 750
>>> res = slice.run() #doctest: +SKIP
"""
_cmd = 'slicer'
input_spec = SlicerInputSpec
output_spec = SlicerOutputSpec
def _format_arg(self, name, spec, value):
if name == 'show_orientation':
if value:
return ''
else:
return '-u'
elif name == "label_slices":
if value:
return '-L'
else:
return ''
return super(Slicer, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file, ext='.png')
outputs['out_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['out_file']
return None
class PlotTimeSeriesInputSpec(FSLCommandInputSpec):
in_file = traits.Either(File(exists=True), traits.List(File(exists=True)),
mandatory=True, argstr="%s", position=1,
desc=("file or list of files with columns of "
"timecourse information"))
plot_start = traits.Int(argstr="--start=%d", xor=("plot_range",),
desc="first column from in-file to plot")
plot_finish = traits.Int(argstr="--finish=%d", xor=("plot_range",),
desc="final column from in-file to plot")
plot_range = traits.Tuple(traits.Int, traits.Int, argstr="%s",
xor=("plot_start", "plot_finish"),
desc=("first and last columns from the in-file "
"to plot"))
title = traits.Str(argstr="%s", desc="plot title")
legend_file = File(exists=True, argstr="--legend=%s", desc="legend file")
labels = traits.Either(traits.Str, traits.List(traits.Str),
argstr="%s", desc="label or list of labels")
y_min = traits.Float(argstr="--ymin=%.2f", desc="minumum y value",
xor=("y_range",))
y_max = traits.Float(argstr="--ymax=%.2f", desc="maximum y value",
xor=("y_range",))
y_range = traits.Tuple(traits.Float, traits.Float, argstr="%s",
xor=("y_min", "y_max"),
desc="min and max y axis values")
x_units = traits.Int(
argstr="-u %d", usedefault=True, default_value=1,
desc=("scaling units for x-axis (between 1 and length of in file)"))
plot_size = traits.Tuple(traits.Int, traits.Int, argstr="%s",
desc="plot image height and width")
x_precision = traits.Int(argstr="--precision=%d",
desc="precision of x-axis labels")
sci_notation = traits.Bool(argstr="--sci",
desc="switch on scientific notation")
out_file = File(argstr="-o %s", genfile=True,
desc="image to write", hash_files=False)
class PlotTimeSeriesOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='image to write')
class PlotTimeSeries(FSLCommand):
"""Use fsl_tsplot to create images of time course plots.
Examples
--------
>>> import nipype.interfaces.fsl as fsl
>>> plotter = fsl.PlotTimeSeries()
>>> plotter.inputs.in_file = 'functional.par'
>>> plotter.inputs.title = 'Functional timeseries'
>>> plotter.inputs.labels = ['run1', 'run2']
>>> plotter.run() #doctest: +SKIP
"""
_cmd = "fsl_tsplot"
input_spec = PlotTimeSeriesInputSpec
output_spec = PlotTimeSeriesOutputSpec
def _format_arg(self, name, spec, value):
if name == "in_file":
if isinstance(value, list):
args = ",".join(value)
return "-i %s" % args
else:
return "-i %s" % value
elif name == "labels":
if isinstance(value, list):
args = ",".join(value)
return "-a %s" % args
else:
return "-a %s" % value
elif name == "title":
return "-t \'%s\'" % value
elif name == "plot_range":
return "--start=%d --finish=%d" % value
elif name == "y_range":
return "--ymin=%d --ymax=%d" % value
elif name == "plot_size":
return "-h %d -w %d" % value
return super(PlotTimeSeries, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
if isinstance(self.inputs.in_file, list):
infile = self.inputs.in_file[0]
else:
infile = self.inputs.in_file
out_file = self._gen_fname(infile, ext='.png')
outputs['out_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['out_file']
return None
class PlotMotionParamsInputSpec(FSLCommandInputSpec):
in_file = traits.Either(File(exists=True), traits.List(File(exists=True)),
mandatory=True, argstr="%s", position=1,
desc="file with motion parameters")
in_source = traits.Enum("spm", "fsl", mandatory=True,
desc=("which program generated the motion "
"parameter file - fsl, spm"))
plot_type = traits.Enum("rotations", "translations", "displacement",
argstr="%s", mandatory=True,
desc=("which motion type to plot - rotations, "
"translations, displacement"))
plot_size = traits.Tuple(traits.Int, traits.Int, argstr="%s",
desc="plot image height and width")
out_file = File(argstr="-o %s", genfile=True,
desc="image to write", hash_files=False)
class PlotMotionParamsOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='image to write')
class PlotMotionParams(FSLCommand):
"""Use fsl_tsplot to plot the estimated motion parameters from a
realignment program.
Examples
--------
>>> import nipype.interfaces.fsl as fsl
>>> plotter = fsl.PlotMotionParams()
>>> plotter.inputs.in_file = 'functional.par'
>>> plotter.inputs.in_source = 'fsl'
>>> plotter.inputs.plot_type = 'rotations'
>>> res = plotter.run() #doctest: +SKIP
Notes
-----
The 'in_source' attribute determines the order of columns that are expected
in the source file. FSL prints motion parameters in the order rotations,
translations, while SPM prints them in the opposite order. This interface
should be able to plot timecourses of motion parameters generated from
other sources as long as they fall under one of these two patterns. For
more flexibilty, see the :class:`fsl.PlotTimeSeries` interface.
"""
_cmd = 'fsl_tsplot'
input_spec = PlotMotionParamsInputSpec
output_spec = PlotMotionParamsOutputSpec
def _format_arg(self, name, spec, value):
if name == "plot_type":
source = self.inputs.in_source
if self.inputs.plot_type == 'displacement':
title = '-t \'MCFLIRT estimated mean displacement (mm)\''
labels = '-a abs,rel'
return '%s %s' % (title, labels)
# Get the right starting and ending position depending on source
# package
sfdict = dict(fsl_rot=(1, 3), fsl_tra=(
4, 6), spm_rot=(4, 6), spm_tra=(1, 3))
# Format the title properly
sfstr = "--start=%d --finish=%d" % sfdict[
"%s_%s" % (source, value[:3])]
titledict = dict(fsl="MCFLIRT", spm="Realign")
unitdict = dict(rot="radians", tra="mm")
title = "\'%s estimated %s (%s)\'" % (
titledict[source], value, unitdict[value[:3]])
return "-t %s %s -a x,y,z" % (title, sfstr)
elif name == "plot_size":
return "-h %d -w %d" % value
elif name == "in_file":
if isinstance(value, list):
args = ",".join(value)
return "-i %s" % args
else:
return "-i %s" % value
return super(PlotMotionParams, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
if isinstance(self.inputs.in_file, list):
infile = self.inputs.in_file[0]
else:
infile = self.inputs.in_file
plttype = dict(rot="rot", tra="trans", dis="disp")[
self.inputs.plot_type[:3]]
out_file = fname_presuffix(
infile, suffix="_%s.png" % plttype, use_ext=False)
outputs['out_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['out_file']
return None
class ConvertXFMInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr="%s", position=-1,
desc="input transformation matrix")
in_file2 = File(
exists=True, argstr="%s", position=-2,
desc="second input matrix (for use with fix_scale_skew or concat_xfm)")
_options = ["invert_xfm", "concat_xfm", "fix_scale_skew"]
invert_xfm = traits.Bool(argstr="-inverse", position=-3, xor=_options,
desc="invert input transformation")
concat_xfm = traits.Bool(argstr="-concat", position=-3, xor=_options,
requires=["in_file2"],
desc=("write joint transformation of two input "
"matrices"))
fix_scale_skew = traits.Bool(argstr="-fixscaleskew", position=-3,
xor=_options, requires=["in_file2"],
desc=("use secondary matrix to fix scale and "
"skew"))
out_file = File(genfile=True, argstr="-omat %s", position=1,
desc="final transformation matrix", hash_files=False)
class ConvertXFMOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output transformation matrix")
class ConvertXFM(FSLCommand):
"""Use the FSL utility convert_xfm to modify FLIRT transformation matrices.
Examples
--------
>>> import nipype.interfaces.fsl as fsl
>>> invt = fsl.ConvertXFM()
>>> invt.inputs.in_file = "flirt.mat"
>>> invt.inputs.invert_xfm = True
>>> invt.inputs.out_file = 'flirt_inv.mat'
>>> invt.cmdline
'convert_xfm -omat flirt_inv.mat -inverse flirt.mat'
"""
_cmd = "convert_xfm"
input_spec = ConvertXFMInputSpec
output_spec = ConvertXFMOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outfile = self.inputs.out_file
if not isdefined(outfile):
_, infile1, _ = split_filename(self.inputs.in_file)
if self.inputs.invert_xfm:
outfile = fname_presuffix(infile1,
suffix="_inv.mat",
newpath=os.getcwd(),
use_ext=False)
else:
if self.inputs.concat_xfm:
_, infile2, _ = split_filename(self.inputs.in_file2)
outfile = fname_presuffix("%s_%s" % (infile1, infile2),
suffix=".mat",
newpath=os.getcwd(),
use_ext=False)
else:
outfile = fname_presuffix(infile1,
suffix="_fix.mat",
newpath=os.getcwd(),
use_ext=False)
outputs["out_file"] = os.path.abspath(outfile)
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class SwapDimensionsInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr="%s", position="1",
desc="input image")
_dims = ["x", "-x", "y", "-y", "z",
"-z", "RL", "LR", "AP", "PA", "IS", "SI"]
new_dims = traits.Tuple(traits.Enum(_dims), traits.Enum(_dims),
traits.Enum(_dims), argstr="%s %s %s",
mandatory=True,
desc="3-tuple of new dimension order")
out_file = File(genfile=True, argstr="%s",
desc="image to write", hash_files=False)
class SwapDimensionsOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="image with new dimensions")
class SwapDimensions(FSLCommand):
"""Use fslswapdim to alter the orientation of an image.
This interface accepts a three-tuple corresponding to the new
orientation. You may either provide dimension ids in the form of
(-)x, (-)y, or (-z), or nifti-syle dimension codes
(RL, LR, AP, PA, IS, SI).
"""
_cmd = "fslswapdim"
input_spec = SwapDimensionsInputSpec
output_spec = SwapDimensionsOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(self.inputs.out_file):
outputs["out_file"] = self._gen_fname(self.inputs.in_file,
suffix='_newdims')
outputs["out_file"] = os.path.abspath(outputs["out_file"])
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class PowerSpectrumInputSpec(FSLCommandInputSpec):
# We use position args here as list indices - so a negative number
# will put something on the end
in_file = File(exists=True,
desc="input 4D file to estimate the power spectrum",
argstr='%s', position=0, mandatory=True)
out_file = File(desc='name of output 4D file for power spectrum',
argstr='%s', position=1, genfile=True, hash_files=False)
class PowerSpectrumOutputSpec(TraitedSpec):
out_file = File(
exists=True, desc="path/name of the output 4D power spectrum file")
class PowerSpectrum(FSLCommand):
"""Use FSL PowerSpectrum command for power spectrum estimation.
Examples
--------
>>> from nipype.interfaces import fsl
>>> pspec = fsl.PowerSpectrum()
>>> pspec.inputs.in_file = 'functional.nii'
>>> res = pspec.run() # doctest: +SKIP
"""
_cmd = 'fslpspec'
input_spec = PowerSpectrumInputSpec
output_spec = PowerSpectrumOutputSpec
def _gen_outfilename(self):
out_file = self.inputs.out_file
if not isdefined(out_file) and isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_ps')
return out_file
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
class SigLossInputSpec(FSLCommandInputSpec):
in_file = File(mandatory=True,
exists=True,
argstr='-i %s',
desc='b0 fieldmap file')
out_file = File(argstr='-s %s',
desc='output signal loss estimate file',
genfile=True)
mask_file = File(exists=True,
argstr='-m %s',
desc='brain mask file')
echo_time = traits.Float(argstr='--te=%f',
desc='echo time in seconds')
slice_direction = traits.Enum('x', 'y', 'z',
argstr='-d %s',
desc='slicing direction')
class SigLossOuputSpec(TraitedSpec):
out_file = File(exists=True,
desc='signal loss estimate file')
class SigLoss(FSLCommand):
"""Estimates signal loss from a field map (in rad/s)
Examples
--------
>>> sigloss = SigLoss()
>>> sigloss.inputs.in_file = "phase.nii"
>>> sigloss.inputs.echo_time = 0.03
>>> res = sigloss.run() # doctest: +SKIP
"""
input_spec = SigLossInputSpec
output_spec = SigLossOuputSpec
_cmd = 'sigloss'
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
if not isdefined(outputs['out_file']) and \
isdefined(self.inputs.in_file):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_sigloss')
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['out_file']
return None
class Reorient2StdInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr="%s")
out_file = File(genfile=True, hash_files=False, argstr="%s")
class Reorient2StdOutputSpec(TraitedSpec):
out_file = File(exists=True)
class Reorient2Std(FSLCommand):
"""fslreorient2std is a tool for reorienting the image to match the
approximate orientation of the standard template images (MNI152).
Examples
--------
>>> reorient = Reorient2Std()
>>> reorient.inputs.in_file = "functional.nii"
>>> res = reorient.run() # doctest: +SKIP
"""
_cmd = 'fslreorient2std'
input_spec = Reorient2StdInputSpec
output_spec = Reorient2StdOutputSpec
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_fname(self.inputs.in_file,
suffix="_reoriented")
return None
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = self._gen_filename('out_file')
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class InvWarpInputSpec(FSLCommandInputSpec):
warp = File(exists=True, argstr='--warp=%s', mandatory=True,
desc=('Name of file containing warp-coefficients/fields. This '
'would typically be the output from the --cout switch of'
' fnirt (but can also use fields, like the output from '
'--fout).'))
reference = File(exists=True, argstr='--ref=%s', mandatory=True,
desc=('Name of a file in target space. Note that the '
'target space is now different from the target '
'space that was used to create the --warp file. It '
'would typically be the file that was specified '
'with the --in argument when running fnirt.'))
inverse_warp = File(argstr='--out=%s', name_source=['warp'],
hash_files=False, name_template='%s_inverse',
desc=('Name of output file, containing warps that are '
'the "reverse" of those in --warp. This will be '
'a field-file (rather than a file of spline '
'coefficients), and it will have any affine '
'component included as part of the '
'displacements.'))
absolute = traits.Bool(argstr='--abs', xor=['relative'],
desc=('If set it indicates that the warps in --warp'
' should be interpreted as absolute, provided'
' that it is not created by fnirt (which '
'always uses relative warps). If set it also '
'indicates that the output --out should be '
'absolute.'))
relative = traits.Bool(argstr='--rel', xor=['absolute'],
desc=('If set it indicates that the warps in --warp'
' should be interpreted as relative. I.e. the'
' values in --warp are displacements from the'
' coordinates in the --ref space. If set it '
'also indicates that the output --out should '
'be relative.'))
niter = traits.Int(argstr='--niter=%d',
desc=('Determines how many iterations of the '
'gradient-descent search that should be run.'))
regularise = traits.Float(argstr='--regularise=%f',
desc='Regularization strength (deafult=1.0).')
noconstraint = traits.Bool(argstr='--noconstraint',
desc='Do not apply Jacobian constraint')
jacobian_min = traits.Float(argstr='--jmin=%f',
desc=('Minimum acceptable Jacobian value for '
'constraint (default 0.01)'))
jacobian_max = traits.Float(argstr='--jmax=%f',
desc=('Maximum acceptable Jacobian value for '
'constraint (default 100.0)'))
class InvWarpOutputSpec(TraitedSpec):
inverse_warp = File(exists=True,
desc=('Name of output file, containing warps that are '
'the "reverse" of those in --warp.'))
class InvWarp(FSLCommand):
"""
Use FSL Invwarp to invert a FNIRT warp
Examples
--------
>>> from nipype.interfaces.fsl import InvWarp
>>> invwarp = InvWarp()
>>> invwarp.inputs.warp = "struct2mni.nii"
>>> invwarp.inputs.reference = "anatomical.nii"
>>> invwarp.inputs.output_type = "NIFTI_GZ"
>>> invwarp.cmdline
'invwarp --out=struct2mni_inverse.nii.gz --ref=anatomical.nii --warp=struct2mni.nii'
>>> res = invwarp.run() # doctest: +SKIP
"""
input_spec = InvWarpInputSpec
output_spec = InvWarpOutputSpec
_cmd = 'invwarp'
class ComplexInputSpec(FSLCommandInputSpec):
complex_in_file = File(exists=True, argstr="%s", position=2)
complex_in_file2 = File(exists=True, argstr="%s", position=3)
real_in_file = File(exists=True, argstr="%s", position=2)
imaginary_in_file = File(exists=True, argstr="%s", position=3)
magnitude_in_file = File(exists=True, argstr="%s", position=2)
phase_in_file = File(exists=True, argstr='%s', position=3)
_ofs = ['complex_out_file',
'magnitude_out_file', 'phase_out_file',
'real_out_file', 'imaginary_out_file']
_conversion = ['real_polar', 'real_cartesian',
'complex_cartesian', 'complex_polar',
'complex_split', 'complex_merge', ]
complex_out_file = File(genfile=True, argstr="%s", position=-3,
xor=_ofs + _conversion[:2])
magnitude_out_file = File(genfile=True, argstr="%s", position=-4,
xor=_ofs[:1] + _ofs[3:] + _conversion[1:])
phase_out_file = File(genfile=True, argstr="%s", position=-3,
xor=_ofs[:1] + _ofs[3:] + _conversion[1:])
real_out_file = File(genfile=True, argstr="%s", position=-4,
xor=_ofs[:3] + _conversion[:1] + _conversion[2:])
imaginary_out_file = File(genfile=True, argstr="%s", position=-3,
xor=_ofs[:3] + _conversion[:1] + _conversion[2:])
start_vol = traits.Int(position=-2, argstr='%d')
end_vol = traits.Int(position=-1, argstr='%d')
real_polar = traits.Bool(
argstr='-realpolar', xor=_conversion, position=1,)
# requires=['complex_in_file','magnitude_out_file','phase_out_file'])
real_cartesian = traits.Bool(
argstr='-realcartesian', xor=_conversion, position=1,)
# requires=['complex_in_file','real_out_file','imaginary_out_file'])
complex_cartesian = traits.Bool(
argstr='-complex', xor=_conversion, position=1,)
# requires=['real_in_file','imaginary_in_file','complex_out_file'])
complex_polar = traits.Bool(
argstr='-complexpolar', xor=_conversion, position=1,)
# requires=['magnitude_in_file','phase_in_file',
# 'magnitude_out_file','phase_out_file'])
complex_split = traits.Bool(
argstr='-complexsplit', xor=_conversion, position=1,)
# requires=['complex_in_file','complex_out_file'])
complex_merge = traits.Bool(
argstr='-complexmerge', xor=_conversion + ['start_vol', 'end_vol'],
position=1,)
# requires=['complex_in_file','complex_in_file2','complex_out_file'])
class ComplexOuputSpec(TraitedSpec):
magnitude_out_file = File()
phase_out_file = File()
real_out_file = File()
imaginary_out_file = File()
complex_out_file = File()
class Complex(FSLCommand):
"""fslcomplex is a tool for converting complex data
Examples
--------
>>> cplx = Complex()
>>> cplx.inputs.complex_in_file = "complex.nii"
>>> cplx.real_polar = True
>>> res = cplx.run() # doctest: +SKIP
"""
_cmd = 'fslcomplex'
input_spec = ComplexInputSpec
output_spec = ComplexOuputSpec
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if self.inputs.real_cartesian:
skip += self.inputs._ofs[:3]
elif self.inputs.real_polar:
skip += self.inputs._ofs[:1] + self.inputs._ofs[3:]
else:
skip += self.inputs._ofs[1:]
return super(Complex, self)._parse_inputs(skip)
def _gen_filename(self, name):
if name == 'complex_out_file':
if self.inputs.complex_cartesian:
in_file = self.inputs.real_in_file
elif self.inputs.complex_polar:
in_file = self.inputs.magnitude_in_file
elif self.inputs.complex_split or self.inputs.complex_merge:
in_file = self.inputs.complex_in_file
else:
return None
return self._gen_fname(in_file, suffix="_cplx")
elif name == 'magnitude_out_file':
return self._gen_fname(self.inputs.complex_in_file, suffix="_mag")
elif name == 'phase_out_file':
return self._gen_fname(
self.inputs.complex_in_file, suffix="_phase")
elif name == 'real_out_file':
return self._gen_fname(self.inputs.complex_in_file, suffix="_real")
elif name == 'imaginary_out_file':
return self._gen_fname(self.inputs.complex_in_file, suffix="_imag")
return None
def _get_output(self, name):
output = getattr(self.inputs, name)
if not isdefined(output):
output = self._gen_filename(name)
return os.path.abspath(output)
def _list_outputs(self):
outputs = self.output_spec().get()
if self.inputs.complex_cartesian or self.inputs.complex_polar or \
self.inputs.complex_split or self.inputs.complex_merge:
outputs['complex_out_file'] = self._get_output('complex_out_file')
elif self.inputs.real_cartesian:
outputs['real_out_file'] = self._get_output('real_out_file')
outputs['imaginary_out_file'] = self._get_output(
'imaginary_out_file')
elif self.inputs.real_polar:
outputs['magnitude_out_file'] = self._get_output(
'magnitude_out_file')
outputs['phase_out_file'] = self._get_output('phase_out_file')
return outputs
class WarpUtilsInputSpec(FSLCommandInputSpec):
in_file = File(
exists=True, argstr='--in=%s', mandatory=True,
desc=('Name of file containing warp-coefficients/fields. This '
'would typically be the output from the --cout switch of '
'fnirt (but can also use fields, like the output from '
'--fout).'))
reference = File(exists=True, argstr='--ref=%s', mandatory=True,
desc=('Name of a file in target space. Note that the '
'target space is now different from the target '
'space that was used to create the --warp file. It '
'would typically be the file that was specified '
'with the --in argument when running fnirt.'))
out_format = traits.Enum(
'spline', 'field', argstr='--outformat=%s',
desc=('Specifies the output format. If set to field (default) '
'the output will be a (4D) field-file. If set to spline '
'the format will be a (4D) file of spline coefficients.'))
warp_resolution = traits.Tuple(
traits.Float, traits.Float, traits.Float,
argstr='--warpres=%0.4f,%0.4f,%0.4f',
desc=('Specifies the resolution/knot-spacing of the splines pertaining'
' to the coefficients in the --out file. This parameter is only '
'relevant if --outformat is set to spline. It should be noted '
'that if the --in file has a higher resolution, the resulting '
'coefficients will pertain to the closest (in a least-squares'
' sense) file in the space of fields with the --warpres'
' resolution. It should also be noted that the resolution '
'will always be an integer multiple of the voxel '
'size.'))
knot_space = traits.Tuple(
traits.Int, traits.Int, traits.Int,
argstr='--knotspace=%d,%d,%d',
desc=('Alternative (to --warpres) specification of the resolution of '
'the output spline-field.'))
out_file = File(
argstr='--out=%s', position=-1, name_source=['in_file'],
output_name='out_file',
desc=('Name of output file. The format of the output depends on what '
'other parameters are set. The default format is a (4D) '
'field-file. If the --outformat is set to spline the format '
'will be a (4D) file of spline coefficients.'))
write_jacobian = traits.Bool(
False, mandatory=True, usedefault=True,
desc='Switch on --jac flag with automatically generated filename')
out_jacobian = File(
argstr='--jac=%s',
desc=('Specifies that a (3D) file of Jacobian determinants '
'corresponding to --in should be produced and written to '
'filename.'))
with_affine = traits.Bool(
False, argstr='--withaff',
desc=('Specifies that the affine transform (i.e. that which was '
'specified for the --aff parameter in fnirt) should be '
'included as displacements in the --out file. That can be '
'useful for interfacing with software that cannot decode '
'FSL/fnirt coefficient-files (where the affine transform is '
'stored separately from the displacements).'))
class WarpUtilsOutputSpec(TraitedSpec):
out_file = File(
desc=('Name of output file, containing the warp as field or '
'coefficients.'))
out_jacobian = File(
desc=('Name of output file, containing the map of the determinant of '
'the Jacobian'))
class WarpUtils(FSLCommand):
"""Use FSL `fnirtfileutils <http://fsl.fmrib.ox.ac.uk/fsl/fsl-4.1.9/fnirt/warp_utils.html>`_
to convert field->coefficients, coefficients->field, coefficients->other_coefficients etc
Examples
--------
>>> from nipype.interfaces.fsl import WarpUtils
>>> warputils = WarpUtils()
>>> warputils.inputs.in_file = "warpfield.nii"
>>> warputils.inputs.reference = "T1.nii"
>>> warputils.inputs.out_format = 'spline'
>>> warputils.inputs.warp_resolution = (10,10,10)
>>> warputils.inputs.output_type = "NIFTI_GZ"
>>> warputils.cmdline # doctest: +ELLIPSIS
'fnirtfileutils --in=warpfield.nii --outformat=spline --ref=T1.nii --warpres=10.0000,10.0000,10.0000 --out=warpfield_coeffs.nii.gz'
>>> res = invwarp.run() # doctest: +SKIP
"""
input_spec = WarpUtilsInputSpec
output_spec = WarpUtilsOutputSpec
_cmd = 'fnirtfileutils'
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
suffix = 'field'
if (isdefined(self.inputs.out_format) and
self.inputs.out_format == 'spline'):
suffix = 'coeffs'
trait_spec = self.inputs.trait('out_file')
trait_spec.name_template = "%s_" + suffix
if self.inputs.write_jacobian:
if not isdefined(self.inputs.out_jacobian):
jac_spec = self.inputs.trait('out_jacobian')
jac_spec.name_source = ['in_file']
jac_spec.name_template = '%s_jac'
jac_spec.output_name = 'out_jacobian'
else:
skip += ['out_jacobian']
skip += ['write_jacobian']
return super(WarpUtils, self)._parse_inputs(skip=skip)
class ConvertWarpInputSpec(FSLCommandInputSpec):
reference = File(
exists=True, argstr='--ref=%s', mandatory=True, position=1,
desc='Name of a file in target space of the full transform.')
out_file = File(
argstr='--out=%s', position=-1, name_source=['reference'],
name_template='%s_concatwarp', output_name='out_file',
desc=('Name of output file, containing warps that are the combination '
'of all those given as arguments. The format of this will be a '
'field-file (rather than spline coefficients) with any affine '
'components included.'))
premat = File(exists=True, argstr='--premat=%s',
desc='filename for pre-transform (affine matrix)')
warp1 = File(exists=True, argstr='--warp1=%s',
desc='Name of file containing initial '
'warp-fields/coefficients (follows premat). This could '
'e.g. be a fnirt-transform from a subjects structural '
'scan to an average of a group of subjects.')
midmat = File(exists=True, argstr="--midmat=%s",
desc="Name of file containing mid-warp-affine transform")
warp2 = File(
exists=True, argstr='--warp2=%s',
desc='Name of file containing secondary warp-fields/coefficients '
'(after warp1/midmat but before postmat). This could e.g. be a '
'fnirt-transform from the average of a group of subjects to some '
'standard space (e.g. MNI152).')
postmat = File(
exists=True, argstr='--postmat=%s',
desc='Name of file containing an affine transform (applied last). It '
'could e.g. be an affine transform that maps the MNI152-space '
'into a better approximation to the Talairach-space (if indeed '
'there is one).')
shift_in_file = File(
exists=True, argstr='--shiftmap=%s',
desc='Name of file containing a "shiftmap", a non-linear transform '
'with displacements only in one direction (applied first, before '
'premat). This would typically be a fieldmap that has been '
'pre-processed using fugue that maps a subjects functional (EPI) '
'data onto an undistorted space (i.e. a space that corresponds '
'to his/her true anatomy).')
shift_direction = traits.Enum(
'y-', 'y', 'x', 'x-', 'z', 'z-',
argstr="--shiftdir=%s", requires=['shift_in_file'],
desc='Indicates the direction that the distortions from '
'--shiftmap goes. It depends on the direction and '
'polarity of the phase-encoding in the EPI sequence.')
cons_jacobian = traits.Bool(
False, argstr='--constrainj',
desc='Constrain the Jacobian of the warpfield to lie within specified '
'min/max limits.')
jacobian_min = traits.Float(argstr='--jmin=%f',
desc='Minimum acceptable Jacobian value for '
'constraint (default 0.01)')
jacobian_max = traits.Float(argstr='--jmax=%f',
desc='Maximum acceptable Jacobian value for '
'constraint (default 100.0)')
abswarp = traits.Bool(
argstr='--abs', xor=['relwarp'],
desc='If set it indicates that the warps in --warp1 and --warp2 should'
' be interpreted as absolute. I.e. the values in --warp1/2 are '
'the coordinates in the next space, rather than displacements. '
'This flag is ignored if --warp1/2 was created by fnirt, which '
'always creates relative displacements.')
relwarp = traits.Bool(
argstr='--rel', xor=['abswarp'],
desc='If set it indicates that the warps in --warp1/2 should be '
'interpreted as relative. I.e. the values in --warp1/2 are '
'displacements from the coordinates in the next space.')
out_abswarp = traits.Bool(
argstr='--absout', xor=['out_relwarp'],
desc='If set it indicates that the warps in --out should be absolute, '
'i.e. the values in --out are displacements from the coordinates '
'in --ref.')
out_relwarp = traits.Bool(
argstr='--relout', xor=['out_abswarp'],
desc='If set it indicates that the warps in --out should be relative, '
'i.e. the values in --out are displacements from the coordinates '
'in --ref.')
class ConvertWarpOutputSpec(TraitedSpec):
out_file = File(
exists=True,
desc='Name of output file, containing the warp as field or '
'coefficients.')
class ConvertWarp(FSLCommand):
"""Use FSL `convertwarp <http://fsl.fmrib.ox.ac.uk/fsl/fsl-4.1.9/fnirt/warp_utils.html>`_
for combining multiple transforms into one.
Examples
--------
>>> from nipype.interfaces.fsl import ConvertWarp
>>> warputils = ConvertWarp()
>>> warputils.inputs.warp1 = "warpfield.nii"
>>> warputils.inputs.reference = "T1.nii"
>>> warputils.inputs.relwarp = True
>>> warputils.inputs.output_type = "NIFTI_GZ"
>>> warputils.cmdline # doctest: +ELLIPSIS
'convertwarp --ref=T1.nii --rel --warp1=warpfield.nii --out=T1_concatwarp.nii.gz'
>>> res = warputils.run() # doctest: +SKIP
"""
input_spec = ConvertWarpInputSpec
output_spec = ConvertWarpOutputSpec
_cmd = 'convertwarp'
class WarpPointsBaseInputSpec(CommandLineInputSpec):
in_coords = File(exists=True, position=-1, argstr='%s', mandatory=True,
desc='filename of file containing coordinates')
xfm_file = File(exists=True, argstr='-xfm %s', xor=['warp_file'],
desc='filename of affine transform (e.g. source2dest.mat)')
warp_file = File(exists=True, argstr='-warp %s', xor=['xfm_file'],
desc='filename of warpfield (e.g. '
'intermediate2dest_warp.nii.gz)')
coord_vox = traits.Bool(True, argstr='-vox', xor=['coord_mm'],
desc='all coordinates in voxels - default')
coord_mm = traits.Bool(False, argstr='-mm', xor=['coord_vox'],
desc='all coordinates in mm')
out_file = File(name_source='in_coords',
name_template='%s_warped', output_name='out_file',
desc='output file name')
class WarpPointsInputSpec(WarpPointsBaseInputSpec):
src_file = File(exists=True, argstr='-src %s', mandatory=True,
desc='filename of source image')
dest_file = File(exists=True, argstr='-dest %s', mandatory=True,
desc='filename of destination image')
class WarpPointsOutputSpec(TraitedSpec):
out_file = File(
exists=True,
desc='Name of output file, containing the warp as field or '
'coefficients.')
class WarpPoints(CommandLine):
"""Use FSL `img2imgcoord <http://fsl.fmrib.ox.ac.uk/fsl/fsl-4.1.9/flirt/overview.html>`_
to transform point sets. Accepts plain text files and vtk files.
.. Note:: transformation of TrackVis trk files is not yet implemented
Examples
--------
>>> from nipype.interfaces.fsl import WarpPoints
>>> warppoints = WarpPoints()
>>> warppoints.inputs.in_coords = 'surf.txt'
>>> warppoints.inputs.src_file = 'epi.nii'
>>> warppoints.inputs.dest_file = 'T1.nii'
>>> warppoints.inputs.warp_file = 'warpfield.nii'
>>> warppoints.inputs.coord_mm = True
>>> warppoints.cmdline # doctest: +ELLIPSIS
'img2imgcoord -mm -dest T1.nii -src epi.nii -warp warpfield.nii surf.txt'
>>> res = warppoints.run() # doctest: +SKIP
"""
input_spec = WarpPointsInputSpec
output_spec = WarpPointsOutputSpec
_cmd = 'img2imgcoord'
_terminal_output = 'stream'
def __init__(self, command=None, **inputs):
self._tmpfile = None
self._in_file = None
self._outformat = None
super(WarpPoints, self).__init__(command=command, **inputs)
def _format_arg(self, name, trait_spec, value):
if name == 'out_file':
return ''
return super(WarpPoints, self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
fname, ext = op.splitext(self.inputs.in_coords)
setattr(self, '_in_file', fname)
setattr(self, '_outformat', ext[1:])
first_args = super(WarpPoints, self)._parse_inputs(
skip=['in_coords', 'out_file'])
second_args = fname + '.txt'
if ext in ['.vtk', '.trk']:
if self._tmpfile is None:
self._tmpfile = tempfile.NamedTemporaryFile(
suffix='.txt', dir=os.getcwd(), delete=False).name
second_args = self._tmpfile
return first_args + [second_args]
def _vtk_to_coords(self, in_file, out_file=None):
from ..vtkbase import tvtk
from ...interfaces import vtkbase as VTKInfo
if VTKInfo.no_tvtk():
raise ImportError(
'TVTK is required and tvtk package was not found')
reader = tvtk.PolyDataReader(file_name=in_file + '.vtk')
reader.update()
mesh = VTKInfo.vtk_output(reader)
points = mesh.points
if out_file is None:
out_file, _ = op.splitext(in_file) + '.txt'
np.savetxt(out_file, points)
return out_file
def _coords_to_vtk(self, points, out_file):
from ..vtkbase import tvtk
from ...interfaces import vtkbase as VTKInfo
if VTKInfo.no_tvtk():
raise ImportError(
'TVTK is required and tvtk package was not found')
reader = tvtk.PolyDataReader(file_name=self.inputs.in_file)
reader.update()
mesh = VTKInfo.vtk_output(reader)
mesh.points = points
writer = tvtk.PolyDataWriter(file_name=out_file)
VTKInfo.configure_input_data(writer, mesh)
writer.write()
def _trk_to_coords(self, in_file, out_file=None):
from nibabel.trackvis import TrackvisFile
trkfile = TrackvisFile.from_file(in_file)
streamlines = trkfile.streamlines
if out_file is None:
out_file, _ = op.splitext(in_file)
np.savetxt(streamlines, out_file + '.txt')
return out_file + '.txt'
def _coords_to_trk(self, points, out_file):
raise NotImplementedError('trk files are not yet supported')
def _overload_extension(self, value, name):
if name == 'out_file':
return '%s.%s' % (value, getattr(self, '_outformat'))
def _run_interface(self, runtime):
fname = getattr(self, '_in_file')
outformat = getattr(self, '_outformat')
tmpfile = None
if outformat == 'vtk':
tmpfile = self._tmpfile
self._vtk_to_coords(fname, out_file=tmpfile)
elif outformat == 'trk':
tmpfile = self._tmpfile
self._trk_to_coords(fname, out_file=tmpfile)
runtime = super(WarpPoints, self)._run_interface(runtime)
newpoints = np.fromstring(
'\n'.join(runtime.stdout.split('\n')[1:]), sep=' ')
if tmpfile is not None:
try:
os.remove(tmpfile.name)
except:
pass
out_file = self._filename_from_source('out_file')
if outformat == 'vtk':
self._coords_to_vtk(newpoints, out_file)
elif outformat == 'trk':
self._coords_to_trk(newpoints, out_file)
else:
np.savetxt(out_file, newpoints.reshape(-1, 3))
return runtime
class WarpPointsToStdInputSpec(WarpPointsBaseInputSpec):
img_file = File(exists=True, argstr='-img %s', mandatory=True,
desc=('filename of input image'))
std_file = File(exists=True, argstr='-std %s', mandatory=True,
desc=('filename of destination image'))
premat_file = File(exists=True, argstr='-premat %s',
desc=('filename of pre-warp affine transform '
'(e.g. example_func2highres.mat)'))
class WarpPointsToStd(WarpPoints):
"""
Use FSL `img2stdcoord <http://fsl.fmrib.ox.ac.uk/fsl/fsl-4.1.9/flirt/overview.html>`_
to transform point sets to standard space coordinates. Accepts plain text
files and vtk files.
.. Note:: transformation of TrackVis trk files is not yet implemented
Examples
--------
>>> from nipype.interfaces.fsl import WarpPointsToStd
>>> warppoints = WarpPointsToStd()
>>> warppoints.inputs.in_coords = 'surf.txt'
>>> warppoints.inputs.img_file = 'T1.nii'
>>> warppoints.inputs.std_file = 'mni.nii'
>>> warppoints.inputs.warp_file = 'warpfield.nii'
>>> warppoints.inputs.coord_mm = True
>>> warppoints.cmdline # doctest: +ELLIPSIS
'img2stdcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt'
>>> res = warppoints.run() # doctest: +SKIP
"""
input_spec = WarpPointsToStdInputSpec
output_spec = WarpPointsOutputSpec
_cmd = 'img2stdcoord'
_terminal_output = 'file_split'
class WarpPointsFromStdInputSpec(CommandLineInputSpec):
img_file = File(exists=True, argstr='-img %s', mandatory=True,
desc='filename of a destination image')
std_file = File(exists=True, argstr='-std %s', mandatory=True,
desc='filename of the image in standard space')
in_coords = File(exists=True, position=-2, argstr='%s', mandatory=True,
desc='filename of file containing coordinates')
xfm_file = File(exists=True, argstr='-xfm %s', xor=['warp_file'],
desc='filename of affine transform (e.g. source2dest.mat)')
warp_file = File(exists=True, argstr='-warp %s', xor=['xfm_file'],
desc='filename of warpfield (e.g. '
'intermediate2dest_warp.nii.gz)')
coord_vox = traits.Bool(True, argstr='-vox', xor=['coord_mm'],
desc='all coordinates in voxels - default')
coord_mm = traits.Bool(False, argstr='-mm', xor=['coord_vox'],
desc='all coordinates in mm')
class WarpPointsFromStd(CommandLine):
"""
Use FSL `std2imgcoord <http://fsl.fmrib.ox.ac.uk/fsl/fsl-4.1.9/flirt/overview.html>`_
to transform point sets to standard space coordinates. Accepts plain text coordinates
files.
Examples
--------
>>> from nipype.interfaces.fsl import WarpPointsFromStd
>>> warppoints = WarpPointsFromStd()
>>> warppoints.inputs.in_coords = 'surf.txt'
>>> warppoints.inputs.img_file = 'T1.nii'
>>> warppoints.inputs.std_file = 'mni.nii'
>>> warppoints.inputs.warp_file = 'warpfield.nii'
>>> warppoints.inputs.coord_mm = True
>>> warppoints.cmdline # doctest: +ELLIPSIS
'std2imgcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt'
>>> res = warppoints.run() # doctest: +SKIP
"""
input_spec = WarpPointsFromStdInputSpec
output_spec = WarpPointsOutputSpec
_cmd = 'std2imgcoord'
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = op.abspath('stdout.nipype')
return outputs
class MotionOutliersInputSpec(FSLCommandInputSpec):
in_file = File(
exists=True, mandatory=True, desc="unfiltered 4D image",
argstr="-i %s")
out_file = File(argstr="-o %s", name_source='in_file',
name_template='%s_outliers.txt',
keep_extension=True, desc='output outlier file name',
hash_files=False)
mask = File(
exists=True, argstr="-m %s", desc="mask image for calculating metric")
metric = traits.Enum(
'refrms', ['refrms', 'dvars', 'refmse', 'fd', 'fdrms'], argstr="--%s",
desc='metrics: refrms - RMS intensity difference to reference volume '
'as metric [default metric], refmse - Mean Square Error version '
'of refrms (used in original version of fsl_motion_outliers), '
'dvars - DVARS, fd - frame displacement, fdrms - FD with RMS '
'matrix calculation')
threshold = traits.Float(argstr="--thresh=%g",
desc=("specify absolute threshold value "
"(otherwise use box-plot cutoff = P75 + "
"1.5*IQR)"))
no_motion_correction = traits.Bool(
argstr="--nomoco",
desc="do not run motion correction (assumed already done)")
dummy = traits.Int(
argstr="--dummy=%d",
desc='number of dummy scans to delete (before running anything and '
'creating EVs)')
out_metric_values = File(
argstr="-s %s", name_source='in_file', name_template='%s_metrics.txt',
keep_extension=True,
desc='output metric values (DVARS etc.) file name', hash_files=False)
out_metric_plot = File(
argstr="-p %s", name_source='in_file', name_template='%s_metrics.png',
hash_files=False, keep_extension=True,
desc='output metric values plot (DVARS etc.) file name')
class MotionOutliersOutputSpec(TraitedSpec):
out_file = File(exists=True)
out_metric_values = File(exists=True)
out_metric_plot = File(exists=True)
class MotionOutliers(FSLCommand):
"""
Use FSL fsl_motion_outliers`http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FSLMotionOutliers`_ to find outliers in timeseries (4d) data.
Examples
--------
>>> from nipype.interfaces.fsl import MotionOutliers
>>> mo = MotionOutliers()
>>> mo.inputs.in_file = "epi.nii"
>>> mo.cmdline # doctest: +ELLIPSIS
'fsl_motion_outliers -i epi.nii -o epi_outliers.txt -p epi_metrics.png -s epi_metrics.txt'
>>> res = mo.run() # doctest: +SKIP
"""
input_spec = MotionOutliersInputSpec
output_spec = MotionOutliersOutputSpec
_cmd = 'fsl_motion_outliers'
| [
"noreply@github.com"
] | salma1601.noreply@github.com |
672a3b9815fa0172265ae6714397a102bf675799 | 26907a78e55f4436cfcf2b0a18935d79dcc50c2f | /2020/test_02.py | c2bf7c5a80e9be076f1b66a0c13a96cdcf0b281e | [] | no_license | singingknight/adventOfCode | 7f55d9b06c48fa75c7558d4ed2256e841858bf6f | 13d3ffb95d4a2e2c2269a95b1cb8a27b2ac3e3b3 | refs/heads/master | 2022-11-30T19:21:32.212258 | 2020-12-25T11:15:54 | 2020-12-25T11:15:54 | 225,067,214 | 0 | 1 | null | 2022-11-10T08:12:32 | 2019-11-30T20:39:36 | TypeScript | UTF-8 | Python | false | false | 236 | py | from day02 import validPwdCount, validPwdCount2
testList = [
'1-3 a: abcde',
'1-3 b: cdefg',
'2-9 c: ccccccccc'
]
def test_part1():
assert(validPwdCount(testList) == 2)
def test_part2():
assert(validPwdCount2(testList) == 1)
| [
"per.jakobsen@gmail.com"
] | per.jakobsen@gmail.com |
c306ee028f03366c34bdca2afb22d77a7303c459 | a6f70134a9bfdcc630e67a6d05c174d35496ada3 | /Sum of Inverse of Numbers^n.py | bb4e17b7740256803b5bc189aaea48aee10de4d2 | [] | no_license | nauman-sakharkar/Python-2.x | 9c0e9d9e5968631e44ab595175ddcbe0a1b615ad | 31df433481d75c7b76a40b2fc372fa6fefbb779f | refs/heads/master | 2022-10-08T17:20:46.387977 | 2020-06-10T07:36:03 | 2020-06-10T07:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | n=int(input("Enter the Number Of Times = "))
q=int(input("Enter The Number = "))
sum=0
for i in range(1,n+1):
sum=sum+((1/q)**i)
print("",sum)
| [
"50130960+nauman-sakharkar@users.noreply.github.com"
] | 50130960+nauman-sakharkar@users.noreply.github.com |
344914ff635ee7598f00751b9c5725df745637bf | fe7e55dd4f245b18c95afcfb131787fcb595612f | /lib/python3.6/heapq.py | f572e8e985437c4d8ce7c67d41e92ba4f65515e7 | [] | no_license | ElviraMingazova/rand-IS | a8434ebc34c06eca1fa32171b8e257bdd420c2ee | 9d397f5fb9718e676267135b32f1d9beee7f426e | refs/heads/master | 2018-12-19T23:43:36.667407 | 2018-09-30T10:10:01 | 2018-09-30T10:10:01 | 107,152,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | /home/elvira/anaconda3/lib/python3.6/heapq.py | [
"mingazova.elvira@gmail.com"
] | mingazova.elvira@gmail.com |
1d3352d9893bcc8b31c4ddb0277479b962b41ff5 | 15a1d05d48953ddaa835b94c8fb93ca69c02488d | /catkin_ws/src/make_plan/src/make_plan_caller.py | 5ca21a8f98eb974de97b5ea1e21186320fc9dea9 | [] | no_license | ok-kewei/ROS-Navigation | 81b895cccbe65287be98da6e62967ae44dee4ab6 | 5218fe366db3638efc179cc6e55fe368842a1c20 | refs/heads/master | 2023-01-06T15:08:19.822330 | 2020-11-05T20:41:03 | 2020-11-05T20:41:03 | 304,449,087 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #! /usr/bin/env python
import rospy
from nav_msgs.srv import GetPlan, GetPlanRequest
import sys
rospy.init_node('service_client')
rospy.wait_for_service('/move_base/make_plan')
make_plan_service = rospy.ServiceProxy('/move_base/make_plan', GetPlan)
msg = GetPlanRequest()
msg.start.header.frame_id = 'map'
msg.start.pose.position.x = 0
msg.start.pose.position.y = 0
msg.start.pose.position.z = 0
msg.start.pose.orientation.x = 0
msg.start.pose.orientation.y = 0
msg.start.pose.orientation.z = 0
msg.start.pose.orientation.w = 0
msg.goal.header.frame_id = 'map'
msg.goal.pose.position.x = 1
msg.goal.pose.position.y = 2
msg.goal.pose.position.z = 0
msg.goal.pose.orientation.x = 0
msg.goal.pose.orientation.y = 0
msg.goal.pose.orientation.z = 0
msg.goal.pose.orientation.w = 0
result = make_plan_service(msg)
print result | [
"43628709+ok-kewei@users.noreply.github.com"
] | 43628709+ok-kewei@users.noreply.github.com |
dacef40ee0d77541e89664963c507877e18a2ff9 | ad6cbaa1f137dc86ad92295b9ac9b5eabbd32b3f | /stt_service_pb2_grpc.py | 8891e52a73ca1e4dcd981ceae9b40b1f130e5db5 | [] | no_license | ping2lai2/speech_to_text | b67360ad74883cfa29a743c01d49cc5c3bef7b6b | 80056dddd6e4ecc15eeacef21a83f40776f59cf3 | refs/heads/master | 2021-02-15T21:19:21.350641 | 2019-03-05T13:44:32 | 2020-03-04T15:25:58 | 244,932,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import stt_service_pb2 as stt__service__pb2
class SttServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.StreamingRecognize = channel.stream_stream(
'/yandex.cloud.ai.stt.v2.SttService/StreamingRecognize',
request_serializer=stt__service__pb2.StreamingRecognitionRequest.SerializeToString,
response_deserializer=stt__service__pb2.StreamingRecognitionResponse.FromString,
)
class SttServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def StreamingRecognize(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SttServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'StreamingRecognize': grpc.stream_stream_rpc_method_handler(
servicer.StreamingRecognize,
request_deserializer=stt__service__pb2.StreamingRecognitionRequest.FromString,
response_serializer=stt__service__pb2.StreamingRecognitionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.ai.stt.v2.SttService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"eugene.kazak@antidasoftware.com"
] | eugene.kazak@antidasoftware.com |
4ea666bc8e896fbcd40fb73d27e4e967147c0a7b | 3e85618c79a1a934fec543e1327e772ca081a5b9 | /N1226.py | f9cf0945dd8c9496a8325051fcd4c4ce8e6bba04 | [] | no_license | ghdus4185/SWEXPERT | 72d79aa4a668452327a676a644b952bab191c79b | 4dc74ad74df7837450de4ce55526dac7760ce738 | refs/heads/master | 2020-07-16T18:31:22.153239 | 2019-12-20T04:18:30 | 2019-12-20T04:18:30 | 205,843,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import sys
sys.stdin = open('input.txt', 'r')
def find(x,y):
global di, dj, maze, possible, check
stack = []
stack.append([x,y])
while stack:
n = stack.pop()
for k in range(4):
ni = n[0] + di[k]
nj = n[1] + dj[k]
# 범위 안에 있는지
if 0 <= ni < 16 and 0 <= nj < 16:
if maze[ni][nj] == 3:
possible = 1
return possible
if maze[ni][nj] == 0:
stack.append([ni, nj])
maze[n[0]][n[1]] = 1
return possible
di = [-1, 1, 0, 0]
dj = [0, 0, -1, 1]
for tc in range(1, 11):
t = int(input())
maze = [list(map(int, ' '.join(input()).split())) for _ in range(16)]
# 시작점 찾기
res = 0
for i in range(16):
for j in range(16):
if maze[i][j] == 2:
res = 1
break
if res == 1:
break
check = [[0]*16 for _ in range(16)]
possible = 0
find(i, j)
if possible == 1:
print('#{} 1'.format(t))
else:
print('#{} 0'.format(t))
| [
"ckdghdus@naver.com"
] | ckdghdus@naver.com |
7ede643951e1f15dbbd488aee63423bae39dbced | 33db9e6d0a73f2353747a4c9d3223d55a38730a8 | /apps/first_app/models.py | 9c9e3168c45565effb1144ef8f0ded356a58890e | [] | no_license | philmccormick23/Likes-and-Books | 94d778df265fe9b1645f783c83358617ca6fe0c0 | 0a9b18ceb7ce33a72334900e7f9f62b10d87a796 | refs/heads/master | 2020-04-02T15:14:46.314382 | 2018-10-24T19:46:10 | 2018-10-24T19:46:10 | 154,559,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
email = models.EmailField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Books(models.Model):
name = models.CharField(max_length=255)
desc = models.CharField(max_length=255)
upload = models.ForeignKey(User, null=True,related_name="codingdojo", on_delete=models.PROTECT)
users = models.ManyToMfanyField(User, related_name="likes")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True) | [
"phillipmccormick@Phillips-MacBook-Pro.local"
] | phillipmccormick@Phillips-MacBook-Pro.local |
0c21269cec3d106c781ee734f3a60a7415c78889 | 1792509a9accac11c837e2a18dcb3d34f1d7e30e | /client/category.py | edfa52d0b3eb42271cc8d9e90fe84f84bc763d38 | [] | no_license | kafura-kafiri/herb | 2d3166b94e5fdacd106d6c4bc21d09f6c9cf568e | 48329a0059e2843c72ad2d85e7bb31379f0042e5 | refs/heads/master | 2020-04-09T09:35:03.720161 | 2018-12-17T11:02:25 | 2018-12-17T11:02:25 | 160,238,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | import requests
url = 'http://localhost:5000/categories/'
headers = {'content-type': 'application/json'}
_categories = [
{
'ancestors': ['a', 'b', 'c'],
'title': 'd'
}, {
'ancestors': ['x', 'y'],
'title': 'z'
}
]
def fill():
requests.post(url + '*')
print()
print('categories >>')
for category in _categories:
response = requests.post(url + '+', data={'json': str(category)})
print(response.content) | [
"kafura.kafiri@gmail.com"
] | kafura.kafiri@gmail.com |
03f1eecc3a67de6f1eafac6c19edfc3223b0f5d0 | 2ad68da0d7c0d0237b9953744891bc1028c6ed91 | /vimba/__init__.py | 443e0ee55945ae04b8c0a07a3066520bcc34e1a7 | [
"BSD-2-Clause"
] | permissive | SunnyAVT/VimbaPython | 8a9dc74ab2d5425b994ceb31779242e33b110f1e | 82571be46e7f6fb1af272d02a96fb6f331d23d33 | refs/heads/master | 2020-09-20T06:00:33.344031 | 2019-11-27T09:17:09 | 2019-11-27T09:17:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | """BSD 2-Clause License
Copyright (c) 2019, Allied Vision Technologies GmbH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
THE SOFTWARE IS PRELIMINARY AND STILL IN TESTING AND VERIFICATION PHASE AND
IS PROVIDED ON AN “AS IS” AND “AS AVAILABLE” BASIS AND IS BELIEVED TO CONTAIN DEFECTS.
A PRIMARY PURPOSE OF THIS EARLY ACCESS IS TO OBTAIN FEEDBACK ON PERFORMANCE AND
THE IDENTIFICATION OF DEFECT SOFTWARE, HARDWARE AND DOCUMENTATION.
"""
# Suppress 'imported but unused' - Error from static style checker.
# flake8: noqa: F401
__version__ = '0.1.0'
__all__ = [
'Vimba',
'Camera',
'CameraChangeHandler',
'CameraEvent',
'AccessMode',
'PersistType',
'Interface',
'InterfaceType',
'InterfaceChangeHandler',
'InterfaceEvent',
'PixelFormat',
'Frame',
'FeatureTypes',
'FrameHandler',
'FrameStatus',
'Debayer',
'intersect_pixel_formats',
'MONO_PIXEL_FORMATS',
'BAYER_PIXEL_FORMATS',
'RGB_PIXEL_FORMATS',
'RGBA_PIXEL_FORMATS',
'BGR_PIXEL_FORMATS',
'BGRA_PIXEL_FORMATS',
'YUV_PIXEL_FORMATS',
'YCBCR_PIXEL_FORMATS',
'COLOR_PIXEL_FORMATS',
'OPENCV_PIXEL_FORMATS',
'VimbaSystemError',
'VimbaCameraError',
'VimbaInterfaceError',
'VimbaFeatureError',
'VimbaTimeout',
'IntFeature',
'FloatFeature',
'StringFeature',
'BoolFeature',
'EnumEntry',
'EnumFeature',
'CommandFeature',
'RawFeature',
'LogLevel',
'LogConfig',
'Log',
'LOG_CONFIG_TRACE_CONSOLE_ONLY',
'LOG_CONFIG_TRACE_FILE_ONLY',
'LOG_CONFIG_TRACE',
'LOG_CONFIG_INFO_CONSOLE_ONLY',
'LOG_CONFIG_INFO_FILE_ONLY',
'LOG_CONFIG_INFO',
'LOG_CONFIG_WARNING_CONSOLE_ONLY',
'LOG_CONFIG_WARNING_FILE_ONLY',
'LOG_CONFIG_WARNING',
'LOG_CONFIG_ERROR_CONSOLE_ONLY',
'LOG_CONFIG_ERROR_FILE_ONLY',
'LOG_CONFIG_ERROR',
'LOG_CONFIG_CRITICAL_CONSOLE_ONLY',
'LOG_CONFIG_CRITICAL_FILE_ONLY',
'LOG_CONFIG_CRITICAL',
'TraceEnable',
'ScopedLogEnable',
'RuntimeTypeCheckEnable'
]
# Import everything exported from the top level module
from .vimba import Vimba
from .camera import AccessMode, PersistType, Camera, CameraChangeHandler, CameraEvent, FrameHandler
from .interface import Interface, InterfaceType, InterfaceChangeHandler, InterfaceEvent
from .frame import PixelFormat, Frame, Debayer, intersect_pixel_formats, MONO_PIXEL_FORMATS, \
BAYER_PIXEL_FORMATS, RGB_PIXEL_FORMATS, RGBA_PIXEL_FORMATS, BGR_PIXEL_FORMATS, \
BGRA_PIXEL_FORMATS, YUV_PIXEL_FORMATS, YCBCR_PIXEL_FORMATS, \
COLOR_PIXEL_FORMATS, OPENCV_PIXEL_FORMATS, FrameStatus, FeatureTypes
from .error import VimbaSystemError, VimbaCameraError, VimbaInterfaceError, VimbaFeatureError, \
VimbaTimeout
from .feature import IntFeature, FloatFeature, StringFeature, BoolFeature, EnumEntry, EnumFeature, \
CommandFeature, RawFeature
from .util import Log, LogLevel, LogConfig, LOG_CONFIG_TRACE_CONSOLE_ONLY, \
LOG_CONFIG_TRACE_FILE_ONLY, LOG_CONFIG_TRACE, LOG_CONFIG_INFO_CONSOLE_ONLY, \
LOG_CONFIG_INFO_FILE_ONLY, LOG_CONFIG_INFO, LOG_CONFIG_WARNING_CONSOLE_ONLY, \
LOG_CONFIG_WARNING_FILE_ONLY, LOG_CONFIG_WARNING, LOG_CONFIG_ERROR_CONSOLE_ONLY, \
LOG_CONFIG_ERROR_FILE_ONLY, LOG_CONFIG_ERROR, LOG_CONFIG_CRITICAL_CONSOLE_ONLY, \
LOG_CONFIG_CRITICAL_FILE_ONLY, LOG_CONFIG_CRITICAL, ScopedLogEnable, \
TraceEnable, RuntimeTypeCheckEnable
| [
"florian.klostermann@alliedvision.com"
] | florian.klostermann@alliedvision.com |
139a3a9e6acd217e8b8f6f1cb1fff6560717ae03 | 50881c040f81dcb6b9dd63d40fc10eba8dfed633 | /python_playground/python_mythical_creatures/test/test_Unicorn.py | 9b076b4365716e9b41af03c323c8c3423c41c019 | [] | no_license | SiCuellar/playground | 1eee2582d69514fbfc9e37041d1f15e34536da97 | c9a40cd9c0cabcbeec0f273e16a576a92471c87b | refs/heads/master | 2020-09-09T22:18:41.850080 | 2019-12-13T05:17:31 | 2019-12-13T05:17:31 | 221,584,612 | 0 | 0 | null | 2019-12-12T03:11:18 | 2019-11-14T01:25:29 | Python | UTF-8 | Python | false | false | 243 | py | import unittest
def test_sum():
assert sum([1, 2, 3]) == 6, "Should be 6"
def test_sum_tuple():
assert sum((1, 2, 2)) == 5, "Should be 6"
if __name__ == "__main__":
test_sum()
test_sum_tuple()
print("Everything passed")
| [
"cuellarsilvestre@gmail.com"
] | cuellarsilvestre@gmail.com |
62da749d8fe1776099720894ebd45484afc28052 | 7a1d09aa5caec2ca7964f6561a3e255de6c1fc78 | /Python Course/dice_game.py | 628b9ad3417fd8b418b6a64209c2106daad2a21f | [] | no_license | TheKingInDaNorth/practical_python_for_beginners | 0d8799678143d839748345754e78006063a5c4d9 | 5e7b6bd8357aa9a18b4816d5ed0a2911395a65e5 | refs/heads/main | 2023-04-03T10:01:21.689997 | 2021-03-31T16:45:42 | 2021-03-31T16:45:42 | 348,765,112 | 0 | 0 | null | 2021-03-31T16:45:43 | 2021-03-17T15:46:08 | Python | UTF-8 | Python | false | false | 495 | py | import random
def roll_dice():
dice_total = random.randint(1,6) + random.randint(1,6)
return dice_total
def main():
player1 = input("Enter player 1's name: ")
player2 = input("Enter player 2's name: ")
roll1 = roll_dice()
roll2 = roll_dice()
print(player1, 'rolled', roll1)
print(player2, 'rolled', roll2)
if roll1 > roll2:
print(player1, 'wins!')
elif roll2 > roll1:
print(player2, 'wins!')
else:
print('You tie')
main() | [
"60161055+TheKingInDaNorth@users.noreply.github.com"
] | 60161055+TheKingInDaNorth@users.noreply.github.com |
35662ad32cc491d6c5f85ea17467bd46906d4f1b | 04a28b3589e6f6a13d9b181b37fb9bf18129b4d6 | /testing.py | aaac232ed66df0c32459a677cbc73c7ddd05786f | [
"MIT"
] | permissive | achen353/facial-landmarks | e31184c8d6060f44b96fbf441ef6c36d254b284d | b49a3a112d9caa66d912e88674ec94dc4c911515 | refs/heads/master | 2023-08-24T22:18:03.269554 | 2021-09-30T15:53:09 | 2021-09-30T15:53:09 | 412,122,019 | 0 | 0 | MIT | 2021-09-30T15:53:09 | 2021-09-30T15:34:55 | null | UTF-8 | Python | false | false | 2,955 | py | '''
This script compare the results for different face detection models
and it's result for facial landmarks
'''
from imutils import face_utils, video, resize
import dlib
import numpy as np
import cv2
import argparse
W = './shape_predictor_68_face_landmarks.dat'
P = '../face_detection/deploy.prototxt.txt'
M = '../face_detection/res10_300x300_ssd_iter_140000.caffemodel'
T = 0.6
predictor = dlib.shape_predictor(W)
image = cv2.imread("/home/keyur-r/image_data/keyur.jpeg")
# image = resize(image, height=600)
# Converting the image to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# This is based on SSD deep learning pretrained model
dl_detector = cv2.dnn.readNetFromCaffe(P, M)
hog_detector = dlib.get_frontal_face_detector()
# Facial landmarks with HOG
rects = hog_detector(gray, 0)
# For each detected face, find the landmark.
for (i, rect) in enumerate(rects):
# Finding points for rectangle to draw on face
x1, y1, x2, y2, w, h = rect.left(), rect.top(), rect.right() + \
1, rect.bottom() + 1, rect.width(), rect.height()
print(x1, y1, x2, y2)
# Make the prediction and transfom it to numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
cv2.rectangle(image, (x1, y1), (x2, y2), (205, 92, 92), 2)
# Draw on our image, all the finded cordinate points (x,y)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (205, 92, 92), -1)
# Facial landmarks with DL
# https://docs.opencv.org/trunk/d6/d0f/group__dnn.html#ga29f34df9376379a603acd8df581ac8d7
inputBlob = cv2.dnn.blobFromImage(cv2.resize(
image, (300, 300)), 1, (300, 300), (104, 177, 123))
dl_detector.setInput(inputBlob)
detections = dl_detector.forward()
for i in range(0, detections.shape[2]):
# Probability of prediction
prediction_score = detections[0, 0, i, 2]
if prediction_score < T:
continue
# Finding height and width of frame
(h, w) = image.shape[:2]
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(x1, y1, x2, y2) = box.astype("int")
y1, x2 = int(y1 * 1.15), int(x2 * 1.05)
print(x1, y1, x2, y2)
# Make the prediction and transfom it to numpy array
shape = predictor(gray, dlib.rectangle(left=x1, top=y1, right=x2, bottom=y2))
shape = face_utils.shape_to_np(shape)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
# Draw on our image, all the finded cordinate points (x,y)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 0, 255), -1)
img_height, img_width = image.shape[:2]
cv2.putText(image, "HOG", (img_width - 200, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(205, 92, 92), 2)
cv2.putText(image, "DL", (img_width - 100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 0, 255), 2)
# show the output frame
cv2.imshow("Facial Landmarks", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"keyur.rathod@zymr.com"
] | keyur.rathod@zymr.com |
55b6f93691b7de5fd7566c9b56cad59dbd44da7d | 8c86ff06a497951d3dce7013d788d6c66d81ab95 | /bot/forms.py | 819243766716eb0a105e982fb307a7918c63cef1 | [] | no_license | JbFiras/text_to_speech_django | 2748277cf970a5b064b838c44bd5778882e2e750 | 5f9bc481600f0524571fad47247c530e96605157 | refs/heads/master | 2023-06-03T06:26:27.405605 | 2021-06-23T17:45:59 | 2021-06-23T17:45:59 | 379,662,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
| [
"contact@firas-jebari.tn"
] | contact@firas-jebari.tn |
cb62f6b3b1e63ef61da4306d45b76f864602ac8a | a8cd81b83431445d05ebb01640d19defde627021 | /services/service-pick&drop/src/api/v1/models.py | d5eaf6f929c8bf6f13362276a9f77bb820ab074c | [
"MIT"
] | permissive | Beracah-Group/docker-microservices | 9c19856e34650168df24b02a618ea6faa3e2c251 | 2876b05ba585772e97746a11845b64bd4ede61cb | refs/heads/dashboard-service | 2023-02-08T06:15:40.912190 | 2020-02-28T16:51:04 | 2020-02-28T16:51:04 | 241,168,010 | 1 | 0 | null | 2023-02-02T03:27:38 | 2020-02-17T17:30:41 | Python | UTF-8 | Python | false | false | 1,413 | py | # import modules
from datetime import datetime
from src.api.__init__ import databases
# washing class model with methods
class Pickanddrop(databases.Model):
__tablename__ = 'Pickanddrop'
id = databases.Column(databases.Integer, primary_key=True, autoincrement=True)
name = databases.Column(databases.String(20))
price = databases.Column(databases.Integer)
description = databases.Column(databases.String(300))
date_created = databases.Column(databases.DateTime, default=datetime.utcnow())
date_modified = databases.Column(databases.DateTime, default=datetime.utcnow(), onupdate=datetime.utcnow())
type = databases.Column(databases.String(50))
__mapper_args__ = {
'polymorphic_on': type,
'polymorphic_identity': 'Pickanddrop'
}
def save(self):
databases.session.add(self)
databases.session.commit()
def to_json(self):
return {
'id': self.id,
'name': self.name,
'price': self.price,
'description': self.description
}
class Pickdrop(Pickanddrop):
__mapper_args__ = {
'polymorphic_identity': 'pickanddrop'
}
class Selfdrop(Pickanddrop):
__mapper_args__ = {
'polymorphic_identity': 'selfdrop'
}
class Homeservice(Pickanddrop):
__mapper_args__ = {
'polymorphic_identity': 'homeservice'
}
| [
"businge.scott@andela.com"
] | businge.scott@andela.com |
bfb068a116423f3d92a9952c2c96297fc5b2901b | 49ab2cb193abfdf52ca64370a7dc5aad3537440e | /utilities/util.py | 1608a7bba3013edeb4e2f673c15bc44c3cf29de4 | [] | no_license | savitabadhe/sample | 9134558d0563e5291114039d76792077c50eef11 | c163f11eccbb35e3514c5897110190af5ad60037 | refs/heads/master | 2022-11-17T08:47:21.005387 | 2020-07-10T12:53:16 | 2020-07-10T12:53:16 | 274,578,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | """
@package utilities
Util class implementation
All most commonly used utilities should be implemented in this class
Example:
name = self.util.getUniqueName()
"""
import time
import traceback
import random, string
import xlrd
import utilities.custom_logger as cl
import logging
class Util(object):
log = cl.customLogger(logging.INFO)
def read_excel(self):
file_path = "C:\\Users\\savita.badhe\\python_workspace\\Python_selenium_Ass\\tests\\sample.xls"
book = xlrd.open_workbook(file_path)
print("The number of worksheets is", book.nsheets)
print("Worksheet name(s):", book.sheet_names())
sh = book.sheet_by_index(0)
text = sh.cell_value(0, 0)
print(text)
return text
def verifyTextMatch(self, actualText, expectedText):
"""
Verify text match
Parameters:
expectedList: Expected Text
actualList: Actual Text
"""
self.log.info("Actual Text From Application Web UI --> :: " + actualText)
self.log.info("Expected Text From Application Web UI --> :: " + expectedText)
if actualText.lower() == expectedText.lower():
self.log.info("### VERIFICATION MATCHED !!!")
return True
else:
self.log.info("### VERIFICATION DOES NOT MATCHED !!!")
return False
| [
"savita.badhe@nitorinfotech.net"
] | savita.badhe@nitorinfotech.net |
abc23afd2529d451231178fd83d1f7d528a36c60 | 9a1c130a5f1e9957560e22b09e7e3db5af65180b | /hw06/david/G02.py | 73d4a66dbf6da19cddfd367f0e5ee768fb054af2 | [] | no_license | TheRiseOfDavid/NTUTcs_media | 61df3999bb4633d6b4b8f329be107ff76819a5ed | cacdd5bd0a009cedc2c2651d725f789f2a38add0 | refs/heads/master | 2023-05-30T10:51:34.160750 | 2021-06-13T05:21:09 | 2021-06-13T05:21:09 | 344,658,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 18 15:57:30 2021
@author: user
"""
import cv2
import numpy as np
from skimage.feature import hog
from sklearn.datasets import fetch_lfw_people
from sklearn import svm
from scipy.cluster.vq import kmeans, vq
from sklearn.model_selection import train_test_split
import hw06_fn
data_amount = 100
train_amount = 80
test_amount = 20
#return list
dogs = hw06_fn.read("../resize_dog/dog_%.3d.jpg", data_amount)
cats = hw06_fn.read("../resize_cat/cat_%.3d.jpg", data_amount)
#dogs = hw06_fn.read("../Doraemon/images (%d).jpg", data_amount)
#cats = hw06_fn.read("../conan/images (%d).jpg", data_amount)
sift_dogs = hw06_fn.sift(dogs)
sift_cats = hw06_fn.sift(cats)
train_sift = sift_dogs[:train_amount] + sift_cats[:train_amount]
test_sift = sift_dogs[-test_amount:] + sift_cats[-test_amount:]
train_features = hw06_fn.kmeans_return_features(20, train_sift)
test_features = hw06_fn.kmeans_return_features(20, test_sift)
train_target = [0] * train_amount + [1] * train_amount
test_target = [0] * test_amount + [1] * test_amount
clf = svm.SVC(kernel="linear", C=1, gamma="auto")
clf.fit(train_features, train_target)
print("accuracy")
print("train:", clf.score(train_features, train_target))
print("test:", clf.score(test_features, test_target))
| [
"david53133@gmail.com"
] | david53133@gmail.com |
8ebe95e658fa81d2c57f56149b95418881155e43 | 89001e83b276f8906fd3cc08a9feb6e3a172d178 | /doc/publishconf.py | a7e7ef4ea5ca7bb247d8465c50707871edb7965a | [
"Apache-2.0"
] | permissive | incf-nidash/nidm-specs | 9bab2b16a1c6d75ff40e9ef9671591887ee751d3 | b568043b81e75110d63b994c00aa17275a5f4614 | refs/heads/master | 2023-04-05T15:20:31.467970 | 2023-03-22T15:46:01 | 2023-03-22T15:46:01 | 8,960,650 | 18 | 9 | NOASSERTION | 2023-03-22T15:46:03 | 2013-03-22T20:47:55 | Python | UTF-8 | Python | false | false | 530 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://nidm.nidash.org'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
| [
"nolan.nichols@gmail.com"
] | nolan.nichols@gmail.com |
2dedac0723fde2c7d884eb9ada3ecbbc4786f727 | 1d3258de931d7695498e069e5e06deccadc552d9 | /mpg_eightball/wsgi.py | c273bd2fe3e9fb0c18f91fc9f01c4daed140c23c | [] | no_license | ckz8780/mpg_eightball | cf81cd50dde922c68dc3e4691d32e00f78e2c4cf | db84b70fb5e6ce953d80fcb76f29f78948a6bdc9 | refs/heads/master | 2020-03-21T09:07:41.259791 | 2018-06-25T19:57:05 | 2018-06-25T19:57:05 | 138,383,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for mpg_eightball project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpg_eightball.settings")
application = get_wsgi_application()
| [
"ckz8780@gmail.com"
] | ckz8780@gmail.com |
a723eaff6f4ec736d6302d682a44094bb2814bed | 5f4b1457c3f38ab9b3b62b748fc852b0526a668d | /blog/views.py | 27fa09cbbc903ffbb676d054876a77d007be1107 | [] | no_license | AnindhaxNill/django_project2 | 81ae20f89cce259a1e876528996aebc2d39fe12b | 809de48dd7316bca60c44855efe3fbd9c737a07c | refs/heads/master | 2023-05-04T13:36:58.552464 | 2021-05-25T16:32:26 | 2021-05-25T16:32:26 | 370,760,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from django.shortcuts import render,get_object_or_404
from django.contrib.auth.models import User
from .models import *
from django.views.generic import ListView,DetailView,CreateView,UpdateView,DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin,UserPassesTestMixin
def home(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'blog/home.html', context)
class PostListView(ListView):
template_name = 'blog/home.html'
context_object_name = 'posts'
model = Post
ordering= ['-date_posted']
paginate_by = 5
class UserPostListView(ListView):
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
model = Post
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username = self.kwargs.get('username'))
return Post.objects.filter(author = user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin,CreateView):
model = Post
fields = ['title','content']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
model = Post
fields = ['title','content']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def about(request):
return render(request, 'blog/about.html', {'title': 'About'})
| [
"anindha004@gmail.com"
] | anindha004@gmail.com |
e3d7d971d4d1ab830a03f5af3a1ae0ece4a0d061 | 4c802d3a303fbc8261787ef9d283d3a69a16ab84 | /mocbackend/migrations/0014_auto_20180816_1417.py | 0b1474f0d5a3f3076cfb22b9d4eef3fa6a5a6966 | [] | no_license | Gong-hr/mozaik-veza-backend | 2d68b2c1fdd121dabfd4b0a34f5cb06cc1386334 | 63fc8737b4a98da50afd74adf6bf7b31207f2ef0 | refs/heads/master | 2023-02-23T07:40:34.986041 | 2021-01-25T05:52:58 | 2021-01-25T05:52:58 | 303,319,971 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-16 12:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mocbackend', '0013_auto_20180816_1417'),
]
operations = [
migrations.RenameField(
model_name='userentity',
old_name='backend_user',
new_name='owner',
),
migrations.AlterUniqueTogether(
name='userentity',
unique_together=set([('owner', 'entity')]),
),
]
| [
"drutalj@abacusstudio.hr"
] | drutalj@abacusstudio.hr |
006775ffac8c01ec3e6d30f790bdc8716d9a52d4 | 59e0e826aea3dc8b210600ae4fce7709c3ec4e65 | /files/test_base.py | 3b3e97d6733510885542c1da5e33bcb09edc7d25 | [] | no_license | lhy11009/aspectLib | d2be904a4df20445b57525a4cbe573f7d9ebfdcf | d919cadce2b57811351c0615d94da5c6ebfff800 | refs/heads/master | 2023-09-01T19:32:10.422521 | 2023-08-24T09:24:24 | 2023-08-24T09:24:24 | 241,186,934 | 0 | 1 | null | 2021-03-20T18:30:34 | 2020-02-17T19:11:32 | Python | UTF-8 | Python | false | false | 1,181 | py | # -*- coding: utf-8 -*-
r"""Test for foo.py
This outputs:
- test results to value of variable "test_dir"
This depends on:
- source files written from the value of "source_dir"
Examples of usage:
- default usage:
python -m pytest test_foo.py
descriptions:
every function is a separate test, combined usage with pytest module
"""
# import os
# import pytest
# import filecmp # for compare file contents
# import numpy as np
# import shilofue.Foo as Foo # import test module
# from shilofue.Utilities import
# from matplotlib import pyplot as plt
# from shutil import rmtree # for remove directories
test_dir = ".test"
source_dir = os.path.join(os.path.dirname(__file__), 'fixtures', 'parse')
if not os.path.isdir(test_dir):
# check we have the directory to store test result
os.mkdir(test_dir)
def test_foo():
'''
(description)
Asserts:
'''
# assert something
assert(True)
# notes
# to check for error message
# with pytest.raises(SomeError) as _excinfo:
# foo()
# assert(r'foo' in str(_excinfo.value))
# assert the contents of file
# assert(filecmp.cmp(out_path, std_path))
| [
"hylli@ucdavis.edu"
] | hylli@ucdavis.edu |
bffefbdb1d03e1802fc30007513c9749facb031c | 7f9beb228b33002a7a31abeb0341f1114f48df37 | /main.py | 0b6fa9355b8e1d1ec6bfca7fb98742ee04ace9ba | [
"Apache-2.0"
] | permissive | finkelsteinj/poker-ai | c4aabe1853ad2e7a876f863caf7e4e035d9fa270 | a2381584187e79b75feb41b1bfc198cb632818da | refs/heads/main | 2023-03-10T10:24:02.981789 | 2021-02-21T17:13:18 | 2021-02-21T17:13:18 | 340,676,038 | 0 | 2 | Apache-2.0 | 2021-02-21T16:46:01 | 2021-02-20T14:38:05 | Python | UTF-8 | Python | false | false | 239 | py | from Poker import Poker
from Player import Player
from Card import Card
def getInputs():
global numPlayers
numPlayers = int(input('How many players are playing? '))
if __name__ == '__main__':
getInputs()
Poker(numPlayers) | [
"finkelsteinj11@gmail.com"
] | finkelsteinj11@gmail.com |
28cb89506c201fba276f34362a75f76ce01ffe95 | f6d2385cd8eb896e17c5e72ac75abe6a0ba28659 | /greffe1/essais.py | 9f4aebeaa116aa85140e83a9274bb4de511e3b61 | [] | no_license | pastrouveedespeudo/greffegreffe | fba94c9169c3d021714eabf1a45812ca762cfe9d | 8ebe4d555246aed26e705671014a260a23148a6a | refs/heads/master | 2020-06-12T14:50:17.590418 | 2019-07-04T14:01:25 | 2019-07-04T14:01:25 | 194,335,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from fonction import function
from fonction import ecrire
from fonction import lecture
from fonction import ecrire2
page = 'https://fr.yahoo.com/?guccounter=1&guce_referrer=aHR0cHM6Ly93d3cuZ29vZ2xlLmNvbS8&guce_referrer_sig=AQAAAMdlxFFv1CpIEQ0VuhLMZl4pjm_0Ur2KGpLoKBkg4lBqmzqdwLxulK-E29QEXf815EL1VsURfRYB-M3USUSs2fFR6tT63nGaOfQyk5mY4V9AltWx-EzQiluy32sS5KxDY0lQRsL6YmEXNMq4qWdOpBoyt2T6KtkfK9Bce2Dt8ViB'
page = function(page)
page = ecrire(page)
page_affichage = lecture()
ececrire2(page_affichage)
| [
"noreply@github.com"
] | pastrouveedespeudo.noreply@github.com |
ea0b3a58092fa895ad9e6bad059c2894b58aaedd | 99818c8b0296ff1b15b832f2474d0861b508f53a | /src/util/numpy_util.py | a0362625874bc1c1b73b8025558f12d4f0be2e1f | [
"Apache-2.0"
] | permissive | sqsxwj520/DeepNotebooks | 97ecb81ffc9cace876586127a1dbd72a829b52b2 | 036c807753254504c8546fbdfc91a86cd94e6a3d | refs/heads/master | 2023-04-26T13:22:07.612137 | 2019-09-15T14:59:07 | 2019-09-15T14:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import numpy as np
def find_nearest(array, value):
idx = (np.abs(np.reshape(array, (-1, 1)) - np.reshape(value, (1, -1)))).argmin(axis = 0)
return idx
| [
"claas@voelcker.net"
] | claas@voelcker.net |
ca7688703ec43c89f230e5fb981e562d285ed52e | f27e1531a14c4f39766dcfcf0cc9813792ad40a6 | /smile_recog.py | dfad86bb31b0128ecedfb72e4926bac44a8e2f70 | [] | no_license | neil98daftary/face-recognition | aeb154e2dc4a0b9a6b308e40e20f18c61c1ca4d8 | 5212a23c902e14b665ad14c77f105421ab6bafb4 | refs/heads/main | 2023-01-21T16:51:15.903959 | 2020-11-28T15:35:17 | 2020-11-28T15:35:17 | 316,658,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 10:28:53 2020
@author: neil
"""
import cv2
#get the haar features
face = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye = cv2.CascadeClassifier('haarcascade_eye.xml')
smile = cv2.CascadeClassifier('haarcascade_smile.xml')
# Defining a function that will do the detections
def detect(gray, frame):
faces = face.detectMultiScale(gray, 1.3, 5) #image, scale, no. of neighbor zones
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye.detectMultiScale(roi_gray, 1.1, 22)
smiles = smile.detectMultiScale(roi_gray, 1.7, 22)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
for (ex, ey, ew, eh) in smiles:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 255), 2)
return frame
# Doing some Face Recognition with the webcam
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows() | [
"neil98daftary@gmail.com"
] | neil98daftary@gmail.com |
3b7ade9e1d0e877ea632dc23877fbdda42be086b | 8805967dfb30ff6ac4f78211c772a703137439f9 | /files/review/movies2/entertainment_center.py | 7d498bde24f9cffae8b6611e418700c99ee7aafa | [] | no_license | sophieRO1/python_projects | 681d0c733a33b508ae2d7d4012dcd85115145b06 | 6d2877a202cea4f0be6d3a0648f56854587b97aa | refs/heads/master | 2020-03-22T03:06:04.647429 | 2018-07-02T08:29:25 | 2018-07-02T08:29:25 | 139,413,349 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | import media
import fresh_tomatoes
cindrella = media.Movie('cindrella',
'a poor girl who making the prnce fallinng in love with her',
'file:///C:/Users/Bonjour/Desktop/assepoester-1.20170303020003.jpg',
'https://www.youtube.com/watch?v=20DF6U1HcGQ')
black_panthar = media.Movie('black panthar',
'a cool movie is it a sequence of marva',
'file:///E:/MY%20LIFE/girl.jpg',
'https://www.youtube.com/watch?v=xjDjIWPwcPU')
game_night = media.Movie('game night',
' a couple who gone through a lot of facts in thier game night ',
'file:///E:/MY%20LIFE/pexels-photo.jpg',
'https://www.youtube.com/watch?v=qmxMAdV6s4U')
the_little_mermaid = media.Movie('the little mermaid',
'a triditional story of the mermaid',
'file:///E:/MY%20LIFE/pexels-photo-54203.jpg',
'https://www.youtube.com/watch?v=e4LfNLtVQqE')
# print game_night.show_trailer()
movies = [cindrella,black_panthar, game_night,the_little_mermaid ]
fresh_tomatoes.open_movies_page(movies)
| [
"hwauie23@gmail.com"
] | hwauie23@gmail.com |
2f069e6b51314ab6cffe53700121d67f9d5748e8 | 67dfe88ed93ea8d08e62cf06ff5a32153f507cb6 | /libs/utils/__init__.py | 43a258525804099409d3071f637ef9fe02d7ec4f | [
"MIT"
] | permissive | yashkhem1/SPNet | b2e23103b6a9688e3466a07cdcc3bb20e2264859 | 50d18b569d2be9ed440849baf2d85b78cfe05944 | refs/heads/master | 2023-02-14T21:55:49.182285 | 2021-01-03T10:42:43 | 2021-01-03T10:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from .loss import CrossEntropyLoss2d
from .metric import scores
from .metric2 import scores_gzsl
from .crf import dense_crf
| [
"subhc4@gmail.com"
] | subhc4@gmail.com |
4c4db37d9d99aadea067f84ef5a02562e139dd31 | 41cc7b614a4d8435de36a936582c2165da4163b5 | /session 8/bài tập 19.py | 8214e51f1d750115af6d41fd88da94a455f6ed50 | [] | no_license | dgvu/NguyenHuyDuongVu-T4T10 | ab55d11b4900d63a571d186f73a35bc9746648a7 | a12e99a59f47b1c9ceb2e3437049e34d2967ce79 | refs/heads/master | 2020-04-07T02:46:23.603237 | 2019-05-10T16:29:53 | 2019-05-10T16:29:53 | 157,989,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | a = [45, 67, 56, 78]
for i in range(len(a)):
print(i + 1, ", ", a[i])
x = input("Enter your new score ? ")
a.append(x)
for i in range(len(a)):
print(i + 1, ", ", a[i]) | [
"duongvu090903@gmail.com"
] | duongvu090903@gmail.com |
e72e6cb7adb3aad553507b8048e09da634d379a9 | a8d7e94a1178ac59bb60524421d39c80a5f3080a | /testing/client_app/pages/login/login_factory.py | f1ff5a8c5f7e36f7e37bffe08f8442bd5d3c4218 | [
"MIT"
] | permissive | griffinmilsap/gigantum-client | 5d22a3e3690437916f1f670f3ea341c7fb12db13 | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | refs/heads/master | 2021-07-11T21:41:11.420668 | 2021-06-16T13:33:44 | 2021-06-16T13:33:44 | 249,494,372 | 0 | 0 | MIT | 2020-03-23T17:10:21 | 2020-03-23T17:10:20 | null | UTF-8 | Python | false | false | 704 | py | from client_app.pages.login.log_in_page import AuthLogInPage
from client_app.pages.login.ldap_login_page import LdapLogInPage
from client_app.pages.login.internal_login_page import InternalLogInPage
class LoginFactory:
""" A factory class that creates the LoginPage based on the login type provided"""
def load_login_page(self, login_type, driver):
if login_type == 'auth0':
login_page = AuthLogInPage(driver)
elif login_type == 'ldap':
login_page = LdapLogInPage(driver)
elif login_type == 'internal':
login_page = InternalLogInPage(driver)
else:
raise Exception("Invalid login type")
return login_page
| [
"noreply@github.com"
] | griffinmilsap.noreply@github.com |
79258c9426d558486274c453e5f1e7bd0cbb4a0a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/validPalindrome_20200803230103.py | 15299f1055653cb18098fa47a7ef7af4c4238410 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import re
def palindrome(str):
if len(str) == 0:
return True
actualStr = str.lower()
str = str.lower()
cleanStr = re.sub(r"[,.;:@#?!&$]+",' ',str)
print('cleanStr',cleanStr)
str = str.split(" ")
str.reverse()
newArr = []
print(actualStr)
for i in str:
newArr.append(i[::-1])
print(newArr)
palindrome("A man, a plan, a canal: Panama")
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
918da380b47298fcdb7e1bfbb695a97994dd52ab | 87e5bee17b54293c3c1cdbd313fc6b00ac07b581 | /poll_manager/handler/urls.py | 11f337d1e2cb583f1859b950919a9d60d06528a3 | [] | no_license | smapl/poll_manager | 952e97926099656656a3972960983b24026f01bd | b738b130a57022f3e8160b321385b973942da8ff | refs/heads/main | 2023-02-17T12:07:51.519248 | 2021-01-10T21:07:13 | 2021-01-10T21:07:13 | 328,409,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from django.urls import path, include
from .views import active_polls, get_poll, send_poll_answer, get_passed_polls
urlpatterns = [
path("api/active_polls", active_polls, name="active_polls"),
path("api/get_poll", get_poll, name="get_poll"),
path("api/send_poll_answer", send_poll_answer, name="send_poll_answer"),
path("api/get_passed_polls", get_passed_polls, name="get_passed_polls"),
]
| [
"a.mustafin00@mail.ru"
] | a.mustafin00@mail.ru |
64e49a9a1d5aac4d8e593274c3aaa18adc45a81d | f8062ca099ed4817b575abd7962077d574b0dbf9 | /dataarugentation/main3.py | 3a82b080b0fef1c0061cc0c663151fa8fcb4eb66 | [] | no_license | GRSEB9S/ai.challenge | 4c861ee98ea169a34129e0b72667a06436f7182d | f15f5ef36318dfefdb60c9c0479586717c18fb1a | refs/heads/master | 2021-08-23T19:49:58.393485 | 2017-12-06T08:53:53 | 2017-12-06T08:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,301 | py | import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
import cv2
ia.seed(1)
images = np.empty(shape=[2]);
# Example batch of images.
# The array has shape (32, 64, 64, 3) and dtype uint8.
f1 = '/home/frelam/ai_challenger_scene_train_20170904/caffe_train.txt'
import os
def seq_setting():
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontal flips
iaa.Crop(percent=(0, 0.3)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Strengthen or weaken the contrast in each image.
iaa.ContrastNormalization((0.75, 1.5)),
# Add gaussian noise.
# For 50% of all images, we sample the noise once per pixel.
# For the other 50% of all images, we sample the noise per pixel AND
# channel. This can change the color (not only brightness) of the
# pixels.
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=0.2),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (0.6, 1.2), "y": (0.6, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-25, 25),
shear=(-8, 8)
)
], random_order=False) # apply augmenters in random order
return seq
def load_image_dir(image_txt):
image_dir_list = []
image_label_list = []
with open(image_txt,'r') as file:
while 1:
line = file.readline()
if (line):
line_spilt = line.split()
line_spilt[0] = line_spilt[0].replace('.jpg','')
#line_spilt[0] = '/home/frelam/Desktop/data argumentation/scene_train_images_20170904' + line_spilt[0]
image_dir_list.append(line_spilt[0])
image_label_list.append(line_spilt[1])
else:
break
image_dir = np.array(image_dir_list)
image_label = np.array(image_label_list)
return image_dir,image_label
def load_batch_image(image_batch_dir):
images = cv2.imread(image_batch_dir)
return images
def process_batch_image(dataset_root,dataarumation_root,
duplicate_num_per_image,image_dir_batch_array,
image_label_batch_array,seq):
#label_for_save_txt
label_for_save = []
for label in image_label_batch_array:
for j in range(duplicate_num_per_image):
label_for_save.append(label)
label_for_save_array = np.array(label_for_save)
#dir_for_save_txt
dir_for_save = []
for dir in image_dir_batch_array:
for j in range(duplicate_num_per_image):
dir_for_save.append(dir + '_' + str(j)+'.jpg')
#dir for read_image
dir_for_read_image = []
for dir in image_dir_batch_array:
for j in range(duplicate_num_per_image):
dir_for_read_image.append(dataset_root + dir +'.jpg')
#read image
imagess = [cv2.imread(dir) for dir in dir_for_read_image]
#dir_for_write
dir_for_write_image = []
for dir in image_dir_batch_array:
for j in range(duplicate_num_per_image):
dir_for_write_image.append(dataarumation_root + dir + '_' + str(j) + '.jpg')
batch = ia.Batch(
images=np.array(imagess, dtype=np.uint8)
)
yield batch
def main():
batch_size = 100
output_txt = '/media/frelam/683cd494-d120-4dbd-81a4-eb3a90330106/ai_challange_scene_dataset_lmdb/aichallenger_arug20171116/dataarymentation/train_arug2.txt'
dataset_root = '/home/frelam/ai_challenger_scene_train_20170904/train/scene_train_images_20170904/'
dataarumation_root = '/media/frelam/683cd494-d120-4dbd-81a4-eb3a90330106/ai_challange_scene_dataset_lmdb/aichallenger_arug20171116/dataarymentation/scene_train_images_arug20171117/'
duplicate_num_per_image = 10
seq = seq_setting()
#input:
# image_dir_array
# image_label_array
image_dir_array,image_label_array = load_image_dir(f1)
dir_chunk_array = [image_dir_array[x:x+batch_size] for x in xrange(0,len(image_dir_array),batch_size)]
label_chunk_array = [image_label_array[x:x+batch_size] for x in xrange(0,len(image_label_array),batch_size)]
dir_for_save = []
label_for_save = []
#batch process
for i in range(len(dir_chunk_array)):
batch_loader = ia.BatchLoader(process_batch_image(dataset_root,dataarumation_root,
duplicate_num_per_image,dir_chunk_array[i],
label_chunk_array[i],seq))
dir_for_save.append(dirs)
label_for_save.append(labels)
print i
with open(output_txt, 'w') as output:
for i in range(len(dir_for_save)):
for j in range(len(dir_for_save[i])):
output.write(dir_for_save[i][j] + ' ' + label_for_save[i][j] + '\n')
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | GRSEB9S.noreply@github.com |
64798f56924cb3a6122c1d8f1dcda32e5a4d0248 | cf540df0f31fdfa06eed34572d65261641988d63 | /feature_extractors.py | 3f446a2762de31336e7ea9ccd95138282c54c9b9 | [] | no_license | cmilke/HH4b_vbf_analysis | bcc0e45e511b538ce5a6c262b1cd88b1d52ff97e | 660219fa95f88351b14eb4c1624ca5f2c9036b42 | refs/heads/master | 2022-11-19T01:57:05.561780 | 2020-07-22T17:34:03 | 2020-07-22T17:34:03 | 279,408,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,588 | py | import itertools
from uproot_methods import TLorentzVector as LV
_fourvec_names = [ f'vbf_candidates_{v}' for v in ['pT', 'eta', 'phi', 'E'] ]
make_vector_list = lambda datarow: [ LV.from_ptetaphie(*vec) for vec in zip(*datarow[_fourvec_names]) ]
def valid_vbf(datarow):
vector_list = make_vector_list(datarow)
Deta = max([ ( (i+j).mass, abs(i.eta-j.eta) ) for i,j in itertools.combinations(vector_list, 2) ])[1]
return Deta > 3
def get_features_mjj_deta(datarow):
vector_list = make_vector_list(datarow)
pair_list = [ (i,j) for i,j in itertools.combinations(vector_list, 2) ]
if len(pair_list) > 0:
mjj_deta_pair_list = [ ( (i+j).mass, abs(i.eta-j.eta) ) for i,j in pair_list]
mjj_deta_pair_list.sort(reverse=True)
prepared_features = [
mjj_deta_pair_list[0][0],
mjj_deta_pair_list[0][1]
]
else:
prepared_features = [-1,-1]
return prepared_features
def get_features_mjj_deta_fw(datarow):
vector_list = make_vector_list(datarow)
pair_list = [ (i,j) for i,j in itertools.combinations(vector_list, 2) ]
if len(pair_list) > 0:
mjj_deta_pair_list = [ ( (i+j).mass, abs(i.eta-j.eta) ) for i,j in pair_list]
mjj_deta_pair_list.sort(reverse=True)
feature_list = [
mjj_deta_pair_list[0][0],
mjj_deta_pair_list[0][1]
]
feature_list += [ datarow[f'FoxWolfram{fwi}'] for fwi in range(1,8) ]
else:
feature_list = [-1]*9
return feature_list
def get_features_mjj_deta_fw_cent(datarow):
vector_list = make_vector_list(datarow)
if len(vector_list) > 1:
pair_list = [ (i,j) for i,j in itertools.combinations(vector_list, 2) ]
mjj_deta_pair_list = [ ( (i+j).mass, abs(i.eta-j.eta) ) for i,j in pair_list]
mjj_deta_pair_list.sort(reverse=True)
feature_list = [
mjj_deta_pair_list[0][0],
mjj_deta_pair_list[0][1]
]
feature_list += [ datarow[f'FoxWolfram{fwi}'] for fwi in range(1,8) ]
centrality = -1
if len(vector_list) > 2:
mjj_pairs = [ ( (vector_list[i]+vector_list[j]).mass, (i,j) ) for i,j in itertools.combinations(range(len(vector_list)), 2) ]
mjj_pairs.sort(reverse=True)
chosen_jets = { i:vector_list[i] for i in mjj_pairs[0][1] }
possible_additions = [ (i,vector_list[i]) for i in mjj_pairs[1][1] if i not in chosen_jets ]
possible_additions.sort(key=lambda t: t[1].pt, reverse=True)
chosen_jets[ possible_additions[0][0] ] = possible_additions[0][1]
etas = sorted([ jet.eta for jet in chosen_jets.values() ])
centrality = abs(2*(etas[1] - etas[0]) / (etas[2] - etas[0]) - 1)
feature_list.append(centrality)
else:
feature_list = [-1]*10
return feature_list
def get_features_mjjLSL_deta_cent_fw(datarow):
vector_list = make_vector_list(datarow)
if len(vector_list) > 1:
pair_list = [ (i,j) for i,j in itertools.combinations(vector_list, 2) ]
mjj_deta_pair_list = [ ( (i+j).mass, abs(i.eta-j.eta) ) for i,j in pair_list]
mjj_deta_pair_list.sort(reverse=True)
feature_list = [
mjj_deta_pair_list[0][0],
mjj_deta_pair_list[0][1]
]
if len(vector_list) > 2:
feature_list += [
mjj_deta_pair_list[1][0],
mjj_deta_pair_list[1][1]
]
mjj_pairs = [ ( (vector_list[i]+vector_list[j]).mass, (i,j) ) for i,j in itertools.combinations(range(len(vector_list)), 2) ]
mjj_pairs.sort(reverse=True)
chosen_jets = { i:vector_list[i] for i in mjj_pairs[0][1] }
possible_additions = [ (i,vector_list[i]) for i in mjj_pairs[1][1] if i not in chosen_jets ]
possible_additions.sort(key=lambda t: t[1].pt, reverse=True)
chosen_jets[ possible_additions[0][0] ] = possible_additions[0][1]
etas = sorted([ jet.eta for jet in chosen_jets.values() ])
centrality = abs(2*(etas[1] - etas[0]) / (etas[2] - etas[0]) - 1)
feature_list.append(centrality)
else:
feature_list += [-1,-1,-1]
feature_list += [ datarow[f'FoxWolfram{fwi}'] for fwi in range(1,8) ]
else:
feature_list = [-1]*12
return feature_list
Extractors = {
'mjj-Deta': get_features_mjj_deta,
'mjj-Deta-FW': get_features_mjj_deta_fw,
'mjj-Deta-FW-Cent': get_features_mjj_deta_fw_cent,
'mjjLSL-Deta-Cent-FW': get_features_mjjLSL_deta_cent_fw,
}
| [
"chrisdmilke@gmail.com"
] | chrisdmilke@gmail.com |
ec4f6c602cdd8f3760547226ea935b15fad574b7 | 499c97e3456d89ef6a73de7594f80af69703c153 | /code/code-py/Chap2_2_3_Pie Chart.py | 169d929e63fcc638c78118854f2dce69dfbc297a | [] | no_license | whatEpisteme/python | 8bffd9d07c8209e0a4200c68d30ea91bec2a663a | 81fdab986c02c6d1a4b2a8c46aeeaf3d3158bf92 | refs/heads/main | 2023-06-15T18:27:27.317388 | 2021-07-12T02:11:34 | 2021-07-12T02:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | #!/usr/bin/env python
# coding: utf-8
### 라이브러리 호출
import pandas as pd
import numpy as np
### 데이터 호출
# 상위 폴더로 이동 후 data 폴더로 이동
path = '../data/'
# 데이터 호출
df = pd.read_csv(path + 'Sales data/Data.csv')
### 데이터 변수 생성 및 정렬
# 연도, 월 변수 생성
df['year'] = df['OrderDate'].str.slice(start = 0, stop = 4)
df['month'] = df['OrderDate'].str.slice(start = 5, stop = 7)
# 데이터 정렬
df = df.sort_values(by = ['Region','Channel','Category','Item Type','year','month','Gender'])
#### 소수점 출력 설정
# display 옵션을 이용하여 실수(소수점 3자리) 설정 - 지수표현식 해제
pd.options.display.float_format = '{:.2f}'.format
### 시각화를 위한 데이터 가공
# 2020년도 연령별 매출액 비교
df_g = df[df['year'] == '2020'].copy()
# 연령별 매출 합계 산출
df_g1 = df_g.loc[:,['AgeGroup','Revenue']].groupby(by = ['AgeGroup'], as_index = False).sum()
df_g1
# https://plotly.com/python/pie-charts/
import plotly.graph_objects as go
#
trace = go.Pie(labels = df_g1['AgeGroup'],
values = df_g1['Revenue']
)
data = [trace]
layout = go.Layout(title = 'Chapter 2.3 - Pie Chart')
fig = go.Figure(data, layout)
fig.show()
#
trace = go.Pie(labels = df_g1['AgeGroup'],
values = df_g1['Revenue'],
pull = [0, 0, 0.2, 0, 0] # label 순서와 동일 (0~1 범위)
)
data = [trace]
layout = go.Layout(title = 'Chapter 2.3 - Pie Chart Split')
fig = go.Figure(data, layout)
fig.show()
#
trace = go.Pie(labels = df_g1['AgeGroup'],
values = df_g1['Revenue'],
textinfo = 'label+percent', # text 값
insidetextorientation = 'tangential', # testinfo 타입 (tangential / auto / horizontal / radial)
hole = 0.4, # 원 중심부 구멍 크기
)
data = [trace]
layout = go.Layout(title = 'Chapter 2.3 - Pie Chart Hole')
fig = go.Figure(data, layout)
fig.show()
| [
"hsj2864@gmail.com"
] | hsj2864@gmail.com |
4e4e407a30aae6656e79b437e51a28957fafe3a1 | 0676fa55160ece68c1316f997b3609c7532b9238 | /soap/core_send.py | 50f4d312f24d5b6fa7a19cac21754854996e504e | [] | no_license | mamdouhmalek/gccid | f13f98e38e6d924b4e22ac9513cdde71007f7d97 | f20191bf5166c5e5f8a3f7e5b73112e25240e723 | refs/heads/master | 2016-09-05T14:20:16.983877 | 2014-05-23T09:58:53 | 2014-05-23T09:58:53 | 18,293,484 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,770 | py | from configuration import GetConfig, LogInput
from suds.client import Client
from suds.transport.http import HttpAuthenticated
from suds.transport.https import HttpTransport
from urllib2 import HTTPBasicAuthHandler, build_opener, install_opener, urlopen, BaseHandler
import xml
from xml.dom import minidom
if GetConfig('SOAP_CONNECTION') == "LIVE":
connctions = "CSL"
elif GetConfig('SOAP_CONNECTION') == "TEST":
connctions = "CST"
userid = GetConfig(connctions).get('user')
passwd = GetConfig(connctions).get('password')
url = GetConfig(connctions).get('url')
######################### Basic Authentication
import base64
class HTTPSudsPreprocessor(BaseHandler):
def http_request(self, req):
req.add_header('Content-Type', 'text/xml; charset=utf-8')
req.add_header('WWW-Authenticate', 'Basic realm="Control Panel"')
#The below lines are to encode the credentials automatically
cred=userid+':'+passwd
if cred!=None:
enc_cred=base64.encodestring(cred)
req.add_header('Authorization', 'Basic '+ enc_cred.replace('\012',''))
return req
https_request = http_request
http = HttpTransport()
opener = build_opener(HTTPSudsPreprocessor)
http.urlopener = opener
######################### For Basic Authentication #################################
client = Client(url, location=url.replace('?wsdl',''), transport=http, cache=None, timeout=90, faults=False, retxml=True)
#####################################################################################
def SendSOAPMessage(code, request):
##LogInput('core_send.py >> Looking for %s' % request.get('MessageCode'))
if code == "NpRequest":
try:
#LogInput('Inside try statement Sending NpRequest!! %s',str(request))
bb=client.service.SendNpRequest(**request)
#LogInput('printing client code %s' % str(bb))
return bb
except Exception, e:
#LogInput('failed to send NpRequest because of %s',str(e))
return False
elif code == "NpRequestAccept":
#LogInput('Sending %s' % code)
return client.service.SendNpRequestAccept(**request)
elif code == "NpRequestReject":
#LogInput('Sending %s' % code)
return client.service.SendNpRequestReject(**request)
elif code == "NpRequestCancel":
#LogInput('Sending %s' % code)
return client.service.SendNpRequestCancel(**request)
elif code == "NpExecute":
#LogInput('Sending %s' % code)
return client.service.SendNpExecute(**request)
elif code == "NpExecuteComplete":
#LogInput('Sending %s' % code)
try:
#LogInput('Inside try statement !!')
bb=client.service.SendNpExecuteComplete(**request)
#LogInput('printing client code %s' % str(bb))
return bb
except Exception, e:
#LogInput('failed to send NpExecuteComplete because of %s',str(e))
return False
elif code == "NpDeactivate":
#LogInput('Sending %s' % code)
return client.service.SendNpDeactivate(**request)
elif code == "NpDeactivateComplete":
#LogInput('Sending %s' % code)
return client.service.SendNpDeactivateComplete(**request)
elif code == "NpQuery":
#LogInput('Sending %s' % code)
return client.service.SendNpQuery(**request)
elif code == "NpBillingResolution":
#LogInput('Sending %s' % code)
return client.service.SendNpBillingResolution(**request)
elif code == "NpBillingResolutionReceived":
#LogInput('Sending %s' % code)
return client.service.SendNpBillingResolutionReceived(**request)
elif code == "NpBillingResolutionEnd":
#LogInput('Sending %s' % code)
return client.service.SendNpBillingResolutionEnd(**request)
elif code == "NpBillingResolutionAlert":
#LogInput('Sending %s' % code)
return client.service.SendNpBillingResolutionAlert(**request)
elif code == "NpBillingResolutionAlertReceived":
#LogInput('Sending %s' % code)
return client.service.SendNpBillingResolutionAlertReceived(**request)
#else: return 'Bad Message or request string'
def myparser(InString):
if isinstance(InString,tuple):
#res[0] is the error code 500 due to non correct input
#res[1] is containg all the parameters
g=[i for i in InString[1]]
#g[0][0] is an ErrorNotification string, it's not the message code
d=g[0][1]
#convert the resulting content to a dictionary
Dict=dict(d)
#We need to define some exception to raise an error
return Dict
if isinstance(InString,str):
res=str(InString)
#print res
convert=minidom.parseString(res)
Dict={}
flag=0
n='npc:'
c='com:'
Marray=['ServiceType','MessageCode','PortID','OriginationID','DestinationID']
for m in Marray:
try:
Dict[m]=convert.getElementsByTagName(c+m)[0].firstChild.data
flag=1
except:
try:
Dict[m]=convert.getElementsByTagName(n+m)[0].firstChild.data
flag=2
except:
#make some logging
#LogInput('Cannot parse the XML in myparser function')
return False
if flag==1:
h=c
elif flag==2:
h=n
else:
#LogInput('Un known XML Tag name !!!')
return False
if convert.getElementsByTagName(h+'MessageCode')[0].firstChild.data=='NpRequestAck':
params=['Number','SubmissionID','DonorID','RecipientID']
for m in params:
Dict[m]=convert.getElementsByTagName(h+m)[0].firstChild.data
if convert.getElementsByTagName(h+'MessageCode')[0].firstChild.data == u"NpDeactivateAck":
params=['Number','SubscriptionNetworkID','BlockID']
for m in params:
Dict[m]=convert.getElementsByTagName(h+m)[0].firstChild.data
return Dict
| [
"mamdouh.malek@4gtss.com"
] | mamdouh.malek@4gtss.com |
41f8b79db905bc0e33ade9d4c202458c71c6d178 | ca72eb47bd82f87e029f14fb16dbd80340f9f0f7 | /model_zoo/YoloV3/api.py | 2b8f6bbf345e4521bcb6b49f91d4d37b0d25ec82 | [
"Apache-2.0"
] | permissive | sinferwu/YoloAll | b905ebf178d984e8e262599e7101bf9dfb7b235c | 38cd486d1e2ee83c05ee19aab45076b7a7759852 | refs/heads/main | 2023-07-25T10:19:16.551172 | 2021-08-29T02:46:02 | 2021-08-29T02:46:02 | 400,935,791 | 1 | 0 | Apache-2.0 | 2021-08-29T02:48:22 | 2021-08-29T02:48:22 | null | UTF-8 | Python | false | false | 2,378 | py | #! /usr/bin/env python3
from __future__ import division
import os
import argparse
import tqdm
import random
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from pytorchyolo.models import load_model
from pytorchyolo.utils.utils import load_classes, rescale_boxes, non_max_suppression, print_environment_info
from pytorchyolo.utils.datasets import ImageFolder
from pytorchyolo.utils.transforms import Resize, DEFAULT_TRANSFORMS
from common_utils import vis
model = None
device = 'cpu'
img_size = 416
conf_thres =0.5
nms_thres=0.4
def get_support_models():
model_list=[]
now_dir = os.path.dirname(os.path.realpath(__file__))
for file in os.listdir(now_dir):
if str(file).endswith('.weights') and 'yolov3' in str(file):
model_list.append(str(file).replace('.weights', ''))
return model_list
def create_model(model_name='yolov3-tiny', dev='cpu'):
global model
global device
model = None
device = dev
model_cfg = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config/%s.cfg'%(model_name))
model_weight = os.path.join(os.path.dirname(os.path.realpath(__file__)), '%s.weights'%(model_name))
model = load_model(model_cfg, device, model_weight)
model.eval()
def inference(img_array):
global model
global device
global img_size
global conf_thres
global nms_thres
map_result = {'type':'img'}
img_tensor = transforms.Compose([
DEFAULT_TRANSFORMS,
Resize(img_size)])(
(img_array, np.zeros((1, 5))))[0].unsqueeze(0)
if device == "cuda":
img_tensor = img_tensor.cuda()
with torch.no_grad():
detections = model(img_tensor)
detections = non_max_suppression(detections, conf_thres, nms_thres)
detections = rescale_boxes(detections[0], img_size, img_array.shape[:2])
valid_pred = detections.cpu()
boxes = valid_pred[:,0:4]
cls = valid_pred[:, 5]
scores = valid_pred[:, 4]
#x_rate = img_array.shape[1] / size[0]
#y_rate = img_array.shape[0] / size[1]
#boxes[:,0:4:2] = boxes[:,0:4:2] * x_rate
#boxes[:,1:4:2] = boxes[:,1:4:2] * y_rate
vis(img_array, boxes, scores, cls, conf=0.5)
map_result['result'] = img_array
return map_result
| [
"408958731@qq.com"
] | 408958731@qq.com |
ca7095ab3d8c8f9a438a75a24c6495f62b664b90 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_geologies.py | 84e56d430f9355cad6a66d3b9a709b593d67b684 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
from xai.brain.wordbase.nouns._geology import _GEOLOGY
#calss header
class _GEOLOGIES(_GEOLOGY, ):
def __init__(self,):
_GEOLOGY.__init__(self)
self.name = "GEOLOGIES"
self.specie = 'nouns'
self.basic = "geology"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
290a914b6bae37cef63319ce03ef2b923eec6203 | df752e629cd20f9706876fe65dee9d178c1331aa | /Python/mrtfuncs.py | 0943217d51e87cae92470e58b77347cef384d741 | [] | no_license | UPennEoR/MiniRadioTelescope | 3a27d5d6405f2a2d92d0edc3c6146b77365fcb4a | 4fa076bb94cb12864083e96f908d0bc25d386ef9 | refs/heads/master | 2023-02-21T22:33:06.543694 | 2022-02-07T21:37:32 | 2022-02-07T21:37:32 | 63,442,562 | 307 | 36 | null | 2023-02-22T22:13:15 | 2016-07-15T18:23:12 | Jupyter Notebook | UTF-8 | Python | false | false | 3,860 | py | import os
import sys
import serial
def arduinoPort(baudrate, auto = True, debug = False):
# Returns the detected or selected port
while True:
# Variables
status = False
ports = []
indexList = []
# Functions
def portList(portDirectory = '/dev'): # Finds possible ports for your OS
# Variables
linuxPortPrefix = 'tty'
macOSPortPrefix = 'cu.usbmodem'
ports = []
# Functions
def portSearch(portPrefix):
for file in os.listdir(portDirectory):
if file.startswith(portPrefix):
ports.append(os.path.join(portDirectory, file))
# Logic
if sys.platform.startswith('linux'):
portSearch(linuxPortPrefix)
elif sys.platform.startswith('darwin'):
portSearch(macOSPortPrefix)
# Debug
if debug:
print('DEBUG: The following are possible Arduino ports: ')
print('DEBUG: ' + str(ports))
return ports
def testPort(port): # Test serial capability
try:
ser = serial.Serial(port, baudrate)
ser.close()
return True
if debug:
print('DEBUG: Serial device found on ' + port)
except:
return False
if debug:
print('DEBUG: Unable to start ' + port)
def manualPortEntry(): # Manually choose or enter port
ports = []
index = 0
selectionIndex = -1
print('Possible Ports: ')
for port in portList():
print(str(index)+ ' | ' + port)
ports.append(port)
index += 1
print(str(index) + ' | [Manual Input]')
while selectionIndex == -1:
try:
selectionIndex = int(input('Please select port: '))
if selectionIndex == len(ports):
return input('Please manually enter port: ')
elif selectionIndex not in indexList:
raise ValueError
except ValueError:
selection = -1
print('Please make a valid selection.')
return ports[selectionIndex]
def manualPortSelection(ports):
index = 0
selectionIndex = -1
print('Detected Ports: ')
for port in ports:
print(str(index)+ ' | ' + port)
indexList.append(index)
index += 1
while selectionIndex == -1:
try:
selectionIndex = int(input('Please select port: '))
if selectionIndex not in indexList:
raise ValueError
except ValueError:
selection = -1
print('Please make a valid selection.')
return ports[selectionIndex]
# Logic
if auto:
for port in portList():
if testPort(port): # If port works, create list of ports.
ports.append(port)
if debug:
print('DEBUG: Verified port: ' + port)
if len(ports) == 0:
print('Please verify Arduino connection.')
input("Press Enter to retry...")
elif len(ports) > 1:
if debug:
print('DEBUG: More than 1 possible port detected.')
return manualPortSelection(ports)
break
else:
return ports[0]
break
else:
return manualPortEntry()
break
| [
"jaguirre@sas.upenn.edu"
] | jaguirre@sas.upenn.edu |
377de16c937260d90c622d656d7aa16574214d81 | 618b6945d639b03e72cfcdf835f1d89025d3d666 | /kitchen.cs.cmu.edu-2012/main_CRF.py | 9ee415b8e855a6deef873ee2620ce76b35aa71d8 | [] | no_license | TAB9/Masterarbeit-Tamara-Becker | f138a2411757691113a371606e9b5807bd57727e | 884cc4fe943148a01571049dc84f8854a6f2b709 | refs/heads/master | 2020-03-24T00:16:24.935064 | 2018-07-25T10:42:11 | 2018-07-25T10:42:11 | 142,277,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,389 | py | import glob
import matplotlib.pyplot as plt
import numpy as np
import pickle
import scipy.stats
import sklearn_crfsuite
import random
from confusionMetrics import plot_confusion_matrix
from ffmpeginput import input
from math import log, exp
from numpy import prod
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics
from sklearn.metrics import precision_recall_fscore_support, recall_score, precision_score, classification_report, confusion_matrix, f1_score, accuracy_score
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import make_scorer
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import RandomizedSearchCV
from timeRemarks import outputSteps
from pprint import pprint
# using module pickle for loading data from text file
with open("data_random_heuristic.txt", 'rb') as data:
ground_truth_sampled = pickle.load(data)
#--------------seperate random, heuristic sequence for evaluation----------------
y_random = ground_truth_sampled[-2]
y_heuristic = ground_truth_sampled[-1]
# training data
ground_truth_dim = ground_truth_sampled[:13]
# common action set
step_set = set.intersection(*map(set, ground_truth_dim))
# number of streams
number_files = len(ground_truth_dim)
#array for average calcualtion of confusion matrix
average_cm = np.zeros((len(step_set),len(step_set)))
#-------------------------------------------------------------------------------
# define as array for cross validation
X = np.array(ground_truth_sampled)
X_random = X[-2]
X_heuristic = X[-1]
X = X[:13]
#-------------------------Conditional Random Field------------------------------
# use sklearn-crfsuite python wrapper for CRF++ implementation of CRF
# sciki-learn fct for LeaveOneOut
loo = LeaveOneOut()
precision_crf, recall_crf, f1_crf, accuracy_crf = [],[],[],[]
X_train_to_feat = []
prob_results, prob_results_random, prob_results_heuristic = [], [], []
# splitting the data into testset and trainingset
for train, test in loo.split(X):
print("%s %s" % (train, test))
# crossvalidation test und training set
X_train, X_test = X[train], X[test]
# ground_truth for evaluation
y_test = ground_truth_sampled[np.asscalar(np.array([test]))]
# training data
for i in train:
X_train_to_feat.append(X[np.asscalar(np.array([i]))])
# definition of feature functions
def get_verb(action):
verb = action.split('-')
return verb[0]
# get first to characters of verb
def pos(word):
return word[:2]
# get the noun of the action
def get_noun(action):
verb = action.split('-')
for i in verb:
if i in nouns:
break
return i
# Training data to features output dict
def action2features(seq, i):
action = seq[i]
features = {
#'action': word,
'verb': get_verb(action),
#'noun': get_noun(action),
#'pos': pos(get_verb(action)),
}
if i > 0:
action1 = seq[i-1]
features.update({
'-1.verb': get_verb(action1),
#'-1.noun': get_noun(action1)
#'-1.pos': pos(get_verb(action1)),
})
else:
features['BOS'] = True
if i < len(seq)-1:
action1 = seq[i+1]
features.update({
'+1.verb': get_verb(action1),
#'+1.noun': get_noun(action1)
#'+1:pos': pos(get_verb(action1)),
})
else:
features['EOS'] = True
return features
def seq2features(sent):
return [action2features(sent, i) for i in range(len(sent))]
# training data format list of list dict
X_train = [seq2features(s) for s in X_train_to_feat]
y_train = X_train_to_feat
# test data
X_test = [seq2features(y_test)]
y_test = [y_test]
# random, heuristic sequence in format of lists of dicts
X_random = [seq2features(y_random)]
X_heuristic = [seq2features(y_heuristic)]
# taget names for evaluation
target_names = list(step_set)
target_names = sorted(target_names)
# CRF model
model_crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.25,
c2=0.01,
max_iterations=100,
all_possible_transitions=True
)
# train the model
model_crf.fit(X_train, y_train)
labels = list(model_crf.classes_)
# preditcion of the labels of the test sequence
y_pred = model_crf.predict(X_test)
# print output time remarks
outputSteps(y_pred[0])
# calculate probability
def get_prob(y_prob):
prob_actions = []
for i in y_prob:
for x in i:
prob_actions.append(x[max(x, key = x.get)])
prob_seq = prod(prob_actions)
return prob_seq
# Probability to model sequence
y_prob = model_crf.predict_marginals(X_test)
prob_results.append(get_prob(y_prob))
# Probability to model random sequence
y_prob_random_seq = model_crf.predict_marginals(X_random)
prob_results_random.append(get_prob(y_prob_random_seq))
# Probability to model heuristic sequence
y_prob_heuristic_seq = model_crf.predict_marginals(X_heuristic)
prob_results_heuristic.append(get_prob(y_prob_heuristic_seq))
# confusion matrix
cnf_matrix = confusion_matrix(y_test[0], y_pred[0])
normalize = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
average_cm = average_cm + normalize
# -----------------------------evaluation CRF-------------------------------
print('\n\n\n')
precision_crf.append(sklearn_crfsuite.metrics.flat_precision_score(y_test, y_pred, average='weighted'))
recall_crf.append(sklearn_crfsuite.metrics.flat_recall_score(y_test, y_pred, average='weighted'))
f1_crf.append(sklearn_crfsuite.metrics.flat_f1_score(y_test, y_pred, average='weighted'))
accuracy_crf.append(sklearn_crfsuite.metrics.flat_accuracy_score(y_test, y_pred))
print('Probability X_test')
pprint((prob_results))
print('Probability Random Sequence')
pprint((prob_results_random))
print('Probability Heuristic Sequence')
pprint((prob_results_heuristic))
print()
print('Precision')
pprint(np.mean(precision_crf))
print('Recall')
pprint(np.mean(recall_crf))
print('F1-score')
pprint(np.mean(f1_crf))
print('Accuracy')
pprint(np.mean(accuracy_crf))
# calculate and plot average confusion matrix
average_cm = average_cm / number_files
plt.figure()
plot_confusion_matrix(average_cm, classes=target_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
# Hyperparameter Optimization
'''
params_space = {
'c1': scipy.stats.expon(scale=0.5),
'c2': scipy.stats.expon(scale=0.05),
}
# use the same metric for evaluation
f1_scorer = make_scorer(metrics.flat_f1_score,
average='weighted', labels=target_names)
# search
rs = RandomizedSearchCV(model_crf, params_space,
cv=3,
verbose=1,
n_jobs=-1,
n_iter=50,
scoring=f1_scorer)
rs.fit(X_train, y_train)
# crf = rs.best_estimator_
print('best params:', rs.best_params_)
print('best CV score:', rs.best_score_)
print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))
'''
| [
"noreply@github.com"
] | TAB9.noreply@github.com |
6311a8a8847f13f56bf6c925a32dacf30f781bd9 | a45a97455322545bbc040c40fb669e5b2475e651 | /FirstPygame.py | 81f467f0789da285b455d22bd935c427d1167d33 | [] | no_license | BreadBro/GameDesign | e6d2e67a85ce2394656b96a48d0e3b6d880df841 | f7b8db91d6c74283acc893b72986fe36d4497e9b | refs/heads/master | 2023-01-29T14:09:07.837882 | 2020-12-12T17:02:33 | 2020-12-12T17:02:33 | 290,260,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,938 | py | import pygame
pygame.init()
WIDTH = 1000
HEIGHT = 800
screen = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("First Game")
walkRight = [pygame.image.load('Crabbo Pictures\png\Walk (1).png'), pygame.image.load('Crabbo Pictures\png\Walk (2).png'), pygame.image.load('Crabbo Pictures\png\Walk (3).png'), pygame.image.load('Crabbo Pictures\png\Walk (4).png'), pygame.image.load('Crabbo Pictures\png\Walk (5).png'), pygame.image.load('Crabbo Pictures\png\Walk (6).png'), pygame.image.load('Crabbo Pictures\png\Walk (7).png'), pygame.image.load('Crabbo Pictures\png\Walk (8).png'), pygame.image.load('Crabbo Pictures\png\Walk (9).png')]
walkLeft = [pygame.image.load('Crabbo Pictures\png\WalkL (1).png'), pygame.image.load('Crabbo Pictures\png\WalkL (2).png'), pygame.image.load('Crabbo Pictures\png\WalkL (3).png'), pygame.image.load('Crabbo Pictures\png\WalkL (4).png'), pygame.image.load('Crabbo Pictures\png\WalkL (5).png'), pygame.image.load('Crabbo Pictures\png\WalkL (6).png'), pygame.image.load('Crabbo Pictures\png\WalkL (7).png'), pygame.image.load('Crabbo Pictures\png\WalkL (8).png'), pygame.image.load('Crabbo Pictures\png\WalkL (9).png')]
currentBackground = pygame.image.load('Crabbo Pictures\crabbo beach.png')
character = pygame.image.load('Crabbo Pictures\png\Walk (1).png')
#pygame.mixer.init()
#pygame.mixer.music.load("Crabbo Pictures\CrabRave.mp3")
#pygame.mixer.music.play(99999)
#pygame.mixer.music.set_volume(0.1)
x = 100
y = 300
width = 40
height = 60
speed = 5
#to control the frames
clock = pygame.time.Clock()
Jump = False
high = 10
#control left and right move
left = False
right = False
#control my list
walkCount = 0
def redrawGameWindow():
global walkCount #it makes sure is using the global walkCount that created earlier
screen.blit(currentBackground, (0,0))
if walkCount + 1 >= 27:
walkCount = 0
if left:
screen.blit(walkLeft[walkCount//3], (x,y))
walkCount += 1
elif right:
screen.blit(walkRight[walkCount//3], (x,y))
walkCount += 1
else:
screen.blit(character, (x, y))
walkCount = 0
pygame.display.update()
run = True
while run:
clock.tick(27)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and x > speed:
x -= speed
left = True
right = False
#scrollBackground(5,0)
elif keys[pygame.K_RIGHT] and x < WIDTH - speed - width:
x += speed
left = False
right = True
#scrollBackground(-5,0)
else:
left = False
right = False
walkCount = 0
if not(Jump):
"""if keys[pygame.K_UP] and y > speed: # I need to substract to the y
y -= speed
if keys[pygame.K_DOWN] and y < HEIGHT - height - speed: # I need to add to the y
y += speed"""
if keys[pygame.K_SPACE]:
Jump = True
left = False
right = False
walkCount = 0
else:
if high >= -10:
y -= (high * abs(high)) * 0.5
high -= 1
else:
high = 10
Jump = False
# I was attempting to use two conditions to allow the image to change so that I could have a proper order of
# images but whenever I add the second condition it does not work. Without the second condition of
# currentBackground == whatever image that should have been loaded at that time it works besides the fact that
# I can only get to forest2.png and crabbo ocean.png after I leave the starting image.
if x > 900 and currentBackground == pygame.image.load('Crabbo Pictures\crabbo beach.png'):
currentBackground = pygame.image.load('Crabbo Pictures\\forest.png')
x = 100
if x > 900 and currentBackground == pygame.image.load('Crabbo Pictures\\forest.png'):
currentBackground = pygame.image.load('Crabbo Pictures\\forest2.png')
x = 100
if x < 20 and currentBackground == pygame.image.load('Crabbo Pictures\crabbo beach.png'):
currentBackground = pygame.image.load('Crabbo Pictures\crabbo ocean.png')
x = 100
if x < 20 and currentBackground == pygame.image.load('Crabbo Pictures\\forest2.png'):
currentBackground = pygame.image.load('Crabbo Pictures\\forest.png')
x = 100
if x < 20 and currentBackground == pygame.image.load('Crabbo Pictures\\forest.png'):
currentBackground = pygame.image.load('Crabbo Pictures\crabbo beach.png')
x = 100
if x > 900 and currentBackground == pygame.image.load('Crabbo Pictures\crabbo ocean.png'):
pygame.image.load('Crabbo Pictures\crabbo beach.png')
x = 100
redrawGameWindow()
pygame.quit()
| [
"noreply@github.com"
] | BreadBro.noreply@github.com |
f85db437a5027e96641b973b28ae8143d8ef91c3 | db9fdd33a8f7277aa0b210341ca5f3223cf84592 | /smithwilson/tests/test_core.py | aafe3fa1e9064c6e1e7e105c77f6b4488d1bb097 | [
"MIT"
] | permissive | simicd/smith-wilson-py | f42f2e41e9a5bdcace48d558ac7b2044e337d350 | e8e9e0ed6233ded23196ddcddf151ff0159cdaf6 | refs/heads/main | 2022-02-10T18:23:37.230530 | 2022-01-30T12:47:15 | 2022-01-30T12:47:15 | 191,021,279 | 15 | 6 | MIT | 2022-01-30T12:47:16 | 2019-06-09T15:11:35 | Python | UTF-8 | Python | false | false | 14,047 | py |
import unittest
import smithwilson as sw
import numpy as np
class TestSmithWilson(unittest.TestCase):
def test_ufr_discount_factor(self):
"""Test creation of UFR discount factor vector"""
# Input
ufr = 0.029
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0])
# Expected Output
expected = np.array([0.992878614, 0.971817298, 0.866808430, 0.242906395, 0.028059385])
# Actual Output
actual = sw.ufr_discount_factor(ufr=ufr, t=t)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="UFR discount factors not matching")
def test_calculate_prices(self):
"""Test calculation of zero-coupon bond price vector"""
# Input
r = np.array([0.02, 0.025, -0.033, 0.01, 0.0008])
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0])
# Expected Output
expected = np.array([0.995061577, 0.975609756, 1.182681027, 0.611071456, 0.904873593])
# Actual Output
actual = sw.calculate_prices(rates=r, t=t)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Prices not matching")
def test_wilson_function_symmetric(self):
"""Test creation of a symmetric Wilson-function matrix (t1 = t2)"""
# Input
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.2
# Expected Output
expected = np.array([[0.00238438, 0.00872884, 0.02719467, 0.01205822, 0.00139298],
[0.00872884, 0.03320614, 0.10608305, 0.04720974, 0.00545372],
[0.02719467, 0.10608305, 0.42652097, 0.2105409 , 0.02432211],
[0.01205822, 0.04720974, 0.2105409 , 0.55463306, 0.06747646],
[0.00139298, 0.00545372, 0.02432211, 0.06747646, 0.01928956]])
# Actual Output
actual = sw.wilson_function(t1=t, t2=t, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Wilson functions not matching")
def test_wilson_function_asymmetric_t1_lt_t2(self):
"""Test creation of a symmetric Wilson-function matrix (t1 != t2) with length of t1 > length of t2"""
# Input
t_obs = np.array([0.25, 1.0, 5.0, 49.5, 125.0]).reshape((-1, 1))
t_target = np.array([0.25, 0.5, 1.0, 2.0, 2.5, 3.5, 5.0, 10.0, 20.0, 49.5, 125.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.2
# Expected Output
expected = np.array([[0.00238438, 0.00872884, 0.02719467, 0.01205822, 0.00139298],
[0.00463874, 0.01723526, 0.0539627 , 0.0239447 , 0.00276612],
[0.00872884, 0.03320614, 0.10608305, 0.04720974, 0.00545372],
[0.015444 , 0.05969492, 0.20375322, 0.0917584 , 0.01060004],
[0.01817438, 0.07046799, 0.24880429, 0.11307011, 0.013062 ],
[0.02260267, 0.08794588, 0.33012767, 0.15383656, 0.01777143],
[0.02719467, 0.10608305, 0.42652097, 0.2105409 , 0.02432211],
[0.03225016, 0.12614043, 0.54769846, 0.36498556, 0.04216522],
[0.02751232, 0.10770227, 0.47881259, 0.54833094, 0.06336226],
[0.01205822, 0.04720974, 0.2105409 , 0.55463306, 0.06747646],
[0.00139298, 0.00545372, 0.02432211, 0.06747646, 0.01928956]])
# Actual Output
actual = sw.wilson_function(t1=t_target, t2=t_obs, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Wilson functions not matching")
def test_wilson_function_asymmetric_t2_lt_t1(self):
"""Test creation of a symmetric Wilson-function matrix (t1 != t2) with length of t2 > length of t1"""
# Input
t_target = np.array([0.50, 1.5, 7.0, 22.5]).reshape((-1, 1))
t_obs = np.array([0.25, 1.0, 2.0, 2.5, 5.0, 10.0, 20.0]).reshape((-1, 1))
ufr = 0.032
alpha = 0.15
# Expected Output
expected = np.array([[0.00263839, 0.00990704, 0.01791847, 0.02129457, 0.03324991, 0.04184617, 0.03736174],
[0.00714378, 0.02751832, 0.05096578, 0.06087744, 0.09600535, 0.12138299, 0.1085669 ],
[0.01939785, 0.07563626, 0.14568738, 0.17843321, 0.31674624, 0.45088288, 0.42190812],
[0.01768861, 0.06909389, 0.13384921, 0.16464728, 0.3035725 , 0.51271549, 0.69668792]])
# Actual Output
actual = sw.wilson_function(t1=t_target, t2=t_obs, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Wilson functions not matching")
def test_fit_parameters(self):
"""Test estimation of Smith-Wilson parameter vector ζ"""
# Input
r = np.array([0.02, 0.025, -0.033, 0.01, 0.0008]).reshape((-1, 1))
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.2
# Expected Output
expected = np.array([-42.78076209, 23.4627511, -3.96498616, 8.92604195, -75.22418515]).reshape((-1, 1))
# Actual Output
actual = sw.fit_parameters(rates=r, t=t, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Parameter not matching")
def test_fit_smithwilson_rates_actual(self):
"""Test estimation of yield curve fitted with the Smith-Wilson algorithm.
This example uses an actual example from EIOPA. Deviations must be less than 1bps (0.01%).
Source: https://eiopa.europa.eu/Publications/Standards/EIOPA_RFR_20190531.zip
EIOPA_RFR_20190531_Term_Structures.xlsx; Tab: RFR_spot_no_VA; Switzerland
"""
# Input
r = np.array([-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309]).reshape((-1, 1))
t = np.array([float(y + 1) for y in range(len(r))]).reshape((-1, 1)) # 1.0, 2.0, ..., 25.0
ufr = 0.029
alpha = 0.128562
t_target = np.array([float(y + 1) for y in range(65)]).reshape((-1, 1))
# Expected Output
expected = np.array([-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309,
0.00337, 0.00372, 0.00412, 0.00455, 0.00501,
0.00548, 0.00596, 0.00644, 0.00692, 0.00739,
0.00786, 0.00831, 0.00876, 0.00919, 0.00961,
0.01002, 0.01042, 0.01081, 0.01118, 0.01154,
0.01189, 0.01223, 0.01255, 0.01287, 0.01318,
0.01347, 0.01376, 0.01403, 0.0143, 0.01456,
0.01481, 0.01505, 0.01528, 0.01551, 0.01573,
0.01594, 0.01615, 0.01635, 0.01655, 0.01673]).reshape((-1, 1))
# Actual Output
actual = sw.fit_smithwilson_rates(rates_obs=r, t_obs=t, t_target=t_target, ufr=ufr, alpha=alpha)
# Assert - Precision of 4 decimal points equals deviatino of less than 1bps
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=4, err_msg="Fitted rates not matching")
def test_fit_smithwilson_rates_incl_convergence(self):
"""Test estimation of yield curve without known convergence factor alpha.
This example uses an actual example from EIOPA. Deviations must be less than 1bps (0.01%).
Source: https://eiopa.europa.eu/Publications/Standards/EIOPA_RFR_20190531.zip
EIOPA_RFR_20190531_Term_Structures.xlsx; Tab: RFR_spot_no_VA; Switzerland
"""
# Input
r = np.array([-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309]).reshape((-1, 1))
t = np.array([float(y + 1) for y in range(len(r))]).reshape((-1, 1)) # 1.0, 2.0, ..., 25.0
ufr = 0.029
alpha = 0.128562
t_target = np.array([float(y + 1) for y in range(65)]).reshape((-1, 1))
# Expected Output
expected = np.array([-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309,
0.00337, 0.00372, 0.00412, 0.00455, 0.00501,
0.00548, 0.00596, 0.00644, 0.00692, 0.00739,
0.00786, 0.00831, 0.00876, 0.00919, 0.00961,
0.01002, 0.01042, 0.01081, 0.01118, 0.01154,
0.01189, 0.01223, 0.01255, 0.01287, 0.01318,
0.01347, 0.01376, 0.01403, 0.0143, 0.01456,
0.01481, 0.01505, 0.01528, 0.01551, 0.01573,
0.01594, 0.01615, 0.01635, 0.01655, 0.01673]).reshape((-1, 1))
# Actual Output
actual = sw.fit_smithwilson_rates(rates_obs=r, t_obs=t, t_target=t_target, ufr=ufr)
# Assert - Precision of 4 decimal points equals deviatino of less than 1bps
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=4, err_msg="Fitted rates not matching")
def test_fit_smithwilson_rates_random(self):
"""Test estimation of yield curve fitted with the Smith-Wilson algorithm using random data points."""
# Input
r = np.array([0.02, 0.025, -0.033, 0.01, 0.0008]).reshape((-1, 1))
t = np.array([0.25, 1.0, 5.0, 20.0, 25.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.12
t_target = np.array([0.25, 0.5, 1.0, 2.0, 2.5, 3.5, 5.0, 10.0, 20.0, 49.5, 125.0]).reshape((-1, 1))
# Expected Output
expected = np.array([0.02, 0.02417656, 0.025, 0.00361999, -0.00733027,
-0.02345319, -0.033, -0.01256218, 0.01, 0.00715949, 0.02015626]).reshape((-1, 1))
# Actual Output
actual = sw.fit_smithwilson_rates(rates_obs=r, t_obs=t, t_target=t_target, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Fitted rates not matching")
def test_fit_alpha(self):
"""Test estimation of convergence factor alpha.
This example uses an actual example from EIOPA. Deviations must be less than 0.001.
Source: https://eiopa.europa.eu/Publications/Standards/EIOPA_RFR_20190531.zip
EIOPA_RFR_20190531_Term_Structures.xlsx; Tab: RFR_spot_no_VA; Switzerland
"""
# Input
r = np.array([-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309]).reshape((-1, 1))
t = np.array([float(y + 1) for y in range(len(r))]).reshape((-1, 1)) # 1.0, 2.0, ..., 25.0
ufr = 0.029
# Expected Output
alpha_expected = 0.128562
# Actual Output
alpha_actual = sw.fit_convergence_parameter(rates_obs=r, t_obs=t, ufr=ufr)
# Assert - Precision of 4 decimal points equals deviatino of less than 1bps
self.assertEqual(type(alpha_actual), type(alpha_expected), "Returned types not matching")
self.assertAlmostEqual(alpha_actual, alpha_expected, msg="Alpha not matching", delta=0.001)
| [
"10134699+simicd@users.noreply.github.com"
] | 10134699+simicd@users.noreply.github.com |
47fdd194eeaf4bba70ad20cd0abe5d247a08db82 | f3209b50bd154be6ab3de5ed6ab639b99e157272 | /geoprocessing_cpp/probabilityLayerMultiPriority/rasterAgreementTable.py | 94dc6594e0d4d20a960ca4943312acd75fa881ab | [] | no_license | andriybun/bun-various-projects | 7e300cb2a663771b279df4210eed1b436cbdd1f8 | 79914097cc47929af39642cf4d971b53f99b3598 | refs/heads/master | 2016-09-05T13:47:30.932095 | 2014-12-01T12:38:26 | 2014-12-01T12:38:26 | 32,204,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,867 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 08 22:16:02 2010
@author: Andriy Bun
@name: rasterAgreementTable
"""
from utils import *
def invertPriorities(priorityVector):
maxVal = max(priorityVector) + 1
res = []
for val in priorityVector:
res.append(maxVal - val)
return res
class rasterAgreementTable():
def __init__(self, priorityValues1, priorityValues2 = []):
self.priorityValues1 = priorityValues1
self.priorityValues2 = priorityValues2
if len(priorityValues2) == 0:
self.priorityValues2 = []
for i in range(0, len(priorityValues1)):
self.priorityValues2.append(1)
# Verify inputs
if len(priorityValues1) != len(priorityValues1):
raise Exception('Priority vectors must have the same size!')
self.numRasters = len(self.priorityValues1)
self.weights = self.GetWeights(self.priorityValues1)
self.GetClasses()
self.SortTable(['classes1', 'classes2'])
self.SetResultingClasses()
#===============================================================================
# Method to parse rasters' priorities to weights
#===============================================================================
def GetWeights(self, priorityValues):
priorityValuesSorted = priorityValues[:]
priorityValuesSorted.sort()
numValues = len(priorityValues)
priorityCumSum = [1]
weights = []
idx = 0
for i in range(1, numValues):
priorityCumSum.append(priorityCumSum[i-1] + priorityCumSum[idx])
if not(priorityValuesSorted[i] == priorityValuesSorted[i-1]):
idx = i
# Put weights in proper order
for val in priorityValues:
weights.append(max(1, priorityCumSum[findFirst(priorityValuesSorted, val)]))
# # Debug
# print priorityValues
# print priorityValuesSorted
# print priorityCumSum
# print weights
return weights
#===============================================================================
# Get weight classes recursive
#===============================================================================
def GetClasses(self):
self.classes1 = []
self.classes2 = []
self.agreementTable = []
self.data = []
self.curRow = 0
for i in range(0, 2**self.numRasters):
self.agreementTable.append([])
self.GenerateAgreementTable(self.weights, 0)
for i in range(0, 2**self.numRasters):
self.classes1.append(0)
self.classes2.append(0)
for j in range(0, self.numRasters):
self.classes1[i] += self.agreementTable[i][j] * self.weights[j]
self.classes2[i] += self.agreementTable[i][j] * self.priorityValues2[j]
self.data.append(dict(resultingClass = -1, \
classes1 = self.classes1[i], \
classes2 = self.classes2[i], \
agreementTable = self.agreementTable[i] \
))
self.classes1.sort()
# Recursive
def GenerateAgreementTable(self, weights, weightsSum):
if len(weights) > 1:
self.agreementTable[self.curRow].append(1)
self.GenerateAgreementTable(weights[1:len(weights)], weightsSum + weights[0])
for i in self.agreementTable[self.curRow-1][0:-len(weights)]:
self.agreementTable[self.curRow].append(i)
self.agreementTable[self.curRow].append(0)
self.GenerateAgreementTable(weights[1:len(weights)], weightsSum)
else:
#self.classes1.append(weightsSum + weights[0])
#self.classes1.append(weightsSum)
for i in self.agreementTable[self.curRow]:
self.agreementTable[self.curRow+1].append(i)
self.agreementTable[self.curRow].append(1)
self.agreementTable[self.curRow+1].append(0)
self.curRow += 2
#===============================================================================
# Sort table by multiple fields
#===============================================================================
def SortTable(self, fields):
fields.reverse()
for field in fields:
self.data = self.BubbleSort(self.data, field)
#===============================================================================
# Find
#===============================================================================
def FindFirst(self, val1, val2):
i = 0
while (self.data[i]['classes1'] != val1) or (self.data[i]['classes2'] != val2):
i = i + 1
return i
def FindClass(self, val1, val2):
i = 0
while (self.data[i]['classes1'] != val1) or (self.data[i]['classes2'] != val2):
i = i + 1
return self.data[i]['resultingClass']
#===============================================================================
# Bubble sort
#===============================================================================
def BubbleSort(self, tab, field):
i = 0
while (i < len(tab) - 1):
flag = 0
while (tab[i][field] > tab[i+1][field]):
tmp = tab[i]
tab[i] = tab[i+1]
tab[i+1] = tmp
flag = 1
if (flag):
i = max(i - 1, 0)
else:
i = i + 1
return tab
#===============================================================================
# Set resulting classes
#===============================================================================
def SetResultingClasses(self):
currentClass = 1
numRecords = len(self.data)
self.data[0]['resultingClass'] = currentClass
for idx in range(1, numRecords):
if self.data[idx]['classes1'] != self.data[idx-1]['classes1'] and \
self.data[idx]['classes2'] != self.data[idx-1]['classes2']:
currentClass += 1
self.data[idx]['resultingClass'] = currentClass
#===============================================================================
# Print table
#===============================================================================
def Print(self, gui = None, listOfRasters = None):
import os
i = 0;
tableCaption = 'Raster agreement table'
headerSeparator = '-------------------------------'
for val in self.data[0]['agreementTable']:
headerSeparator += '------'
header0 = '%-12s| | || ' % (' ')
header1 = '%-12s| SumW1 | SumW2 || Rasters' % (' Cell class')
header2 = '%-12s| | || ' % (' ')
for i in range(0, self.numRasters):
header2 += '%2d | ' % (i + 1)
header2 = header2[0:-2]
if gui is None:
print '\n'
print tableCaption
print headerSeparator
print header0
print header1
print header2
print headerSeparator
else:
gui.PrintText('\n')
gui.PrintText(tableCaption)
gui.PrintText(headerSeparator)
gui.PrintText(header0)
gui.PrintText(header1)
gui.PrintText(header2)
gui.PrintText(headerSeparator)
for row in self.data:
outString = ' %4d | %3d | %3d || ' % (i, row['classes1'], row['classes2'])
for val in row['agreementTable']:
outString += str(val) + ' | '
outString = outString[0:-3]
i = i + 1
if gui is None:
print outString
else:
gui.PrintText(outString)
if gui is None:
print headerSeparator
print '\n'
else:
gui.PrintText(headerSeparator)
gui.PrintText('\n')
if not(listOfRasters is None):
if gui is None:
print 'Rasters:'
else:
gui.PrintText('Rasters:')
i = 1
for rasterName in listOfRasters:
legend = '%d. - %s' % (i, os.path.basename(rasterName))
i += 1
if gui is None:
print legend
else:
gui.PrintText(legend)
#===============================================================================
# Print table
#===============================================================================
def PrintToFile(self, file, listOfRasters = None):
import os
file.write('Priority values 1:\n')
file.write(str(invertPriorities(self.priorityValues1)) + '\n')
file.write('Priority values 2:\n')
file.write(str(invertPriorities(self.priorityValues2)) + '\n')
i = 0;
tableCaption = 'Raster agreement table\n'
headerSeparator = '-------------------------------'
for val in self.data[0]['agreementTable']:
headerSeparator += '------'
header0 = '%-12s| | || Rasters' % (' ')
header1 = '%-12s| SumW1 | SumW2 || ' % (' Cell class')
header2 = '%-12s| | || ' % (' ')
for i in range(0, self.numRasters):
header2 += '%2d | ' % (i + 1)
header2 = header2[0:-2]
file.write('\n')
file.write(tableCaption + '\n')
file.write(headerSeparator + '\n')
file.write(header0 + '\n')
file.write(header1 + '\n')
file.write(header2 + '\n')
file.write(headerSeparator + '\n')
for row in self.data:
outString = ' %4d | %3d | %3d || ' % (i, row['classes1'], row['classes2'])
for val in row['agreementTable']:
outString += str(val) + ' | '
outString = outString[0:-3]
i = i + 1
file.write(outString + '\n')
file.write(headerSeparator + '\n')
file.write('\n\n')
if not(listOfRasters is None):
file.write('Rasters:\n')
i = 1
for rasterName in listOfRasters:
legend = '%d. - %s' % (i, os.path.basename(rasterName))
i += 1
file.write(legend + '\n')
if __name__ == "__main__":
RAT = rasterAgreementTable([2, 1, 1, 1, 1])
RAT.Print()
print str(RAT.FindFirst(8, 4)) | [
"andr.bun@8e31e031-ea9d-cf67-1966-e0f2b5892f1d"
] | andr.bun@8e31e031-ea9d-cf67-1966-e0f2b5892f1d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.