text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def makevFunc(self,solution):
'''
Creates the value function for this period, defined over market resources
m and persistent income p. self must have the attribute EndOfPrdvFunc in
order to execute.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFunc
A representation of the value function for this period, defined over
market resources m and persistent income p: v = vFuncNow(m,p).
'''
mSize = self.aXtraGrid.size
pSize = self.pLvlGrid.size
# Compute expected value and marginal value on a grid of market resources
pLvl_temp = np.tile(self.pLvlGrid,(mSize,1)) # Tile pLvl across m values
mLvl_temp = np.tile(self.mLvlMinNow(self.pLvlGrid),(mSize,1)) + np.tile(np.reshape(self.aXtraGrid,(mSize,1)),(1,pSize))*pLvl_temp
cLvlNow = solution.cFunc(mLvl_temp,pLvl_temp)
aLvlNow = mLvl_temp - cLvlNow
vNow = self.u(cLvlNow) + self.EndOfPrdvFunc(aLvlNow,pLvl_temp)
vPnow = self.uP(cLvlNow)
# Calculate pseudo-inverse value and its first derivative (wrt mLvl)
vNvrs = self.uinv(vNow) # value transformed through inverse utility
vNvrsP = vPnow*self.uinvP(vNow)
# Add data at the lower bound of m
mLvl_temp = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pSize)),mLvl_temp),axis=0)
vNvrs = np.concatenate((np.zeros((1,pSize)),vNvrs),axis=0)
vNvrsP = np.concatenate((np.reshape(vNvrsP[0,:],(1,vNvrsP.shape[1])),vNvrsP),axis=0)
# Add data at the lower bound of p
MPCminNvrs = self.MPCminNow**(-self.CRRA/(1.0-self.CRRA))
m_temp = np.reshape(mLvl_temp[:,0],(mSize+1,1))
mLvl_temp = np.concatenate((m_temp,mLvl_temp),axis=1)
vNvrs = np.concatenate((MPCminNvrs*m_temp,vNvrs),axis=1)
vNvrsP = np.concatenate((MPCminNvrs*np.ones((mSize+1,1)),vNvrsP),axis=1)
# Construct the pseudo-inverse value function
vNvrsFunc_list = []
for j in range(pSize+1):
pLvl = np.insert(self.pLvlGrid,0,0.0)[j]
vNvrsFunc_list.append(CubicInterp(mLvl_temp[:,j]-self.mLvlMinNow(pLvl),vNvrs[:,j],vNvrsP[:,j],MPCminNvrs*self.hLvlNow(pLvl),MPCminNvrs))
vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_list,np.insert(self.pLvlGrid,0,0.0)) # Value function "shifted"
vNvrsFuncNow = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow)
# "Re-curve" the pseudo-inverse value function into the value function
vFuncNow = ValueFunc2D(vNvrsFuncNow,self.CRRA)
return vFuncNow |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def makeCubiccFunc(self,mLvl,pLvl,cLvl):
'''
Makes a quasi-cubic spline interpolation of the unconstrained consumption
function for this period. Function is cubic splines with respect to mLvl,
but linear in pLvl.
Parameters
----------
mLvl : np.array
Market resource points for interpolation.
pLvl : np.array
Persistent income level points for interpolation.
cLvl : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
'''
# Calculate the MPC at each gridpoint
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*np.sum(self.vPPfuncNext(self.mLvlNext,self.pLvlNext)*self.ShkPrbs_temp,axis=0)
dcda = EndOfPrdvPP/self.uPP(np.array(cLvl[1:,1:]))
MPC = dcda/(dcda+1.)
MPC = np.concatenate((np.reshape(MPC[:,0],(MPC.shape[0],1)),MPC),axis=1) # Stick an extra MPC value at bottom; MPCmax doesn't work
MPC = np.concatenate((self.MPCminNow*np.ones((1,self.aXtraGrid.size+1)),MPC),axis=0)
# Make cubic consumption function with respect to mLvl for each persistent income level
cFunc_by_pLvl_list = [] # list of consumption functions for each pLvl
for j in range(pLvl.shape[0]):
pLvl_j = pLvl[j,0]
m_temp = mLvl[j,:] - self.BoroCnstNat(pLvl_j)
c_temp = cLvl[j,:] # Make a cubic consumption function for this pLvl
MPC_temp = MPC[j,:]
if pLvl_j > 0:
cFunc_by_pLvl_list.append(CubicInterp(m_temp,c_temp,MPC_temp,lower_extrap=True,slope_limit=self.MPCminNow,intercept_limit=self.MPCminNow*self.hLvlNow(pLvl_j)))
else: # When pLvl=0, cFunc is linear
cFunc_by_pLvl_list.append(LinearInterp(m_temp,c_temp,lower_extrap=True))
pLvl_list = pLvl[:,0]
cFuncUncBase = LinearInterpOnInterp1D(cFunc_by_pLvl_list,pLvl_list) # Combine all linear cFuncs
cFuncUnc = VariableLowerBoundFunc2D(cFuncUncBase,self.BoroCnstNat) # Re-adjust for lower bound of natural borrowing constraint
return cFuncUnc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def solve(self):
'''
Solves a one period consumption saving problem with risky income, with
persistent income explicitly tracked as a state variable.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and persistent income), a
marginal value function, bounding MPCs, and human wealth as a func-
tion of persistent income. Might also include a value function and
marginal marginal value function, depending on options selected.
'''
aLvl,pLvl = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
if self.vFuncBool:
self.makeEndOfPrdvFunc(EndOfPrdvP)
if self.CubicBool:
interpolator = self.makeCubiccFunc
else:
interpolator = self.makeLinearcFunc
solution = self.makeBasicSolution(EndOfPrdvP,aLvl,pLvl,interpolator)
solution = self.addMPCandHumanWealth(solution)
if self.vFuncBool:
solution.vFunc = self.makevFunc(solution)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def installRetirementFunc(self):
'''
Installs a special pLvlNextFunc representing retirement in the correct
element of self.pLvlNextFunc. Draws on the attributes T_retire and
pLvlNextFuncRet. If T_retire is zero or pLvlNextFuncRet does not
exist, this method does nothing. Should only be called from within the
method updatepLvlNextFunc, which ensures that time is flowing forward.
Parameters
----------
None
Returns
-------
None
'''
if (not hasattr(self,'pLvlNextFuncRet')) or self.T_retire == 0:
return
t = self.T_retire
self.pLvlNextFunc[t] = self.pLvlNextFuncRet |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getStates(self):
'''
Calculates updated values of normalized market resources and persistent income level for each
agent. Uses pLvlNow, aLvlNow, PermShkNow, TranShkNow.
Parameters
----------
None
Returns
-------
None
'''
aLvlPrev = self.aLvlNow
RfreeNow = self.getRfree()
# Calculate new states: normalized market resources and persistent income level
pLvlNow = np.zeros_like(aLvlPrev)
for t in range(self.T_cycle):
these = t == self.t_cycle
pLvlNow[these] = self.pLvlNextFunc[t-1](self.pLvlNow[these])*self.PermShkNow[these]
self.pLvlNow = pLvlNow # Updated persistent income level
self.bLvlNow = RfreeNow*aLvlPrev # Bank balances before labor income
self.mLvlNow = self.bLvlNow + self.TranShkNow*self.pLvlNow |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def updatepLvlNextFunc(self):
'''
A method that creates the pLvlNextFunc attribute as a sequence of
linear functions, indicating constant expected permanent income growth
across permanent income levels. Draws on the attribute PermGroFac, and
installs a special retirement function when it exists.
Parameters
----------
None
Returns
-------
None
'''
orig_time = self.time_flow
self.timeFwd()
pLvlNextFunc = []
for t in range(self.T_cycle):
pLvlNextFunc.append(LinearInterp(np.array([0.,1.]),np.array([0.,self.PermGroFac[t]])))
self.pLvlNextFunc = pLvlNextFunc
self.addToTimeVary('pLvlNextFunc')
if not orig_time:
self.timeRev() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def updatepLvlNextFunc(self):
'''
A method that creates the pLvlNextFunc attribute as a sequence of
AR1-style functions. Draws on the attributes PermGroFac and PrstIncCorr.
If cycles=0, the product of PermGroFac across all periods must be 1.0,
otherwise this method is invalid.
Parameters
----------
None
Returns
-------
None
'''
orig_time = self.time_flow
self.timeFwd()
pLvlNextFunc = []
pLogMean = self.pLvlInitMean # Initial mean (log) persistent income
for t in range(self.T_cycle):
pLvlNextFunc.append(pLvlFuncAR1(pLogMean,self.PermGroFac[t],self.PrstIncCorr))
pLogMean += np.log(self.PermGroFac[t])
self.pLvlNextFunc = pLvlNextFunc
self.addToTimeVary('pLvlNextFunc')
if not orig_time:
self.timeRev() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def drawDiscrete(N,P=[1.0],X=[0.0],exact_match=False,seed=0):
'''
Simulates N draws from a discrete distribution with probabilities P and outcomes X.
Parameters
----------
P : np.array
A list of probabilities of outcomes.
X : np.array
A list of discrete outcomes.
N : int
Number of draws to simulate.
exact_match : boolean
Whether the draws should "exactly" match the discrete distribution (as
closely as possible given finite draws). When True, returned draws are
a random permutation of the N-length list that best fits the discrete
distribution. When False (default), each draw is independent from the
others and the result could deviate from the input.
seed : int
Seed for random number generator.
Returns
-------
draws : np.array
An array draws from the discrete distribution; each element is a value in X.
'''
# Set up the RNG
RNG = np.random.RandomState(seed)
if exact_match:
events = np.arange(P.size) # just a list of integers
cutoffs = np.round(np.cumsum(P)*N).astype(int) # cutoff points between discrete outcomes
top = 0
# Make a list of event indices that closely matches the discrete distribution
event_list = []
for j in range(events.size):
bot = top
top = cutoffs[j]
event_list += (top-bot)*[events[j]]
# Randomly permute the event indices and store the corresponding results
event_draws = RNG.permutation(event_list)
draws = X[event_draws]
else:
# Generate a cumulative distribution
base_draws = RNG.uniform(size=N)
cum_dist = np.cumsum(P)
# Convert the basic uniform draws into discrete draws
indices = cum_dist.searchsorted(base_draws)
draws = np.asarray(X)[indices]
return draws |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def solveFashion(solution_next,DiscFac,conformUtilityFunc,punk_utility,jock_utility,switchcost_J2P,switchcost_P2J,pGrid,pEvolution,pref_shock_mag):
'''
Solves a single period of the fashion victim model.
Parameters
----------
solution_next: FashionSolution
A representation of the solution to the subsequent period's problem.
DiscFac: float
The intertemporal discount factor.
conformUtilityFunc: function
Utility as a function of the proportion of the population who wears the
same style as the agent.
punk_utility: float
Direct utility from wearing the punk style this period.
jock_utility: float
Direct utility from wearing the jock style this period.
switchcost_J2P: float
Utility cost of switching from jock to punk this period.
switchcost_P2J: float
Utility cost of switching from punk to jock this period.
pGrid: np.array
1D array of "proportion of punks" states spanning [0,1], representing
the fraction of agents *currently* wearing punk style.
pEvolution: np.array
2D array representing the distribution of next period's "proportion of
punks". The pEvolution[i,:] contains equiprobable values of p for next
period if p = pGrid[i] today.
pref_shock_mag: float
Standard deviation of T1EV preference shocks over style.
Returns
-------
solution_now: FashionSolution
A representation of the solution to this period's problem.
'''
# Unpack next period's solution
VfuncPunkNext = solution_next.VfuncPunk
VfuncJockNext = solution_next.VfuncJock
# Calculate end-of-period expected value for each style at points on the pGrid
EndOfPrdVpunk = DiscFac*np.mean(VfuncPunkNext(pEvolution),axis=1)
EndOfPrdVjock = DiscFac*np.mean(VfuncJockNext(pEvolution),axis=1)
# Get current period utility flow from each style (without switching cost)
Upunk = punk_utility + conformUtilityFunc(pGrid)
Ujock = jock_utility + conformUtilityFunc(1.0 - pGrid)
# Calculate choice-conditional value for each combination of current and next styles (at each)
V_J2J = Ujock + EndOfPrdVjock
V_J2P = Upunk - switchcost_J2P + EndOfPrdVpunk
V_P2J = Ujock - switchcost_P2J + EndOfPrdVjock
V_P2P = Upunk + EndOfPrdVpunk
# Calculate the beginning-of-period expected value of each p-state when punk
Vboth_P = np.vstack((V_P2J,V_P2P))
Vbest_P = np.max(Vboth_P,axis=0)
Vnorm_P = Vboth_P - np.tile(np.reshape(Vbest_P,(1,pGrid.size)),(2,1))
ExpVnorm_P = np.exp(Vnorm_P/pref_shock_mag)
SumExpVnorm_P = np.sum(ExpVnorm_P,axis=0)
V_P = np.log(SumExpVnorm_P)*pref_shock_mag + Vbest_P
switch_P = ExpVnorm_P[0,:]/SumExpVnorm_P
# Calculate the beginning-of-period expected value of each p-state when jock
Vboth_J = np.vstack((V_J2J,V_J2P))
Vbest_J = np.max(Vboth_J,axis=0)
Vnorm_J = Vboth_J - np.tile(np.reshape(Vbest_J,(1,pGrid.size)),(2,1))
ExpVnorm_J = np.exp(Vnorm_J/pref_shock_mag)
SumExpVnorm_J = np.sum(ExpVnorm_J,axis=0)
V_J = np.log(SumExpVnorm_J)*pref_shock_mag + Vbest_J
switch_J = ExpVnorm_J[1,:]/SumExpVnorm_J
# Make value and policy functions for each style
VfuncPunkNow = LinearInterp(pGrid,V_P)
VfuncJockNow = LinearInterp(pGrid,V_J)
switchFuncPunkNow = LinearInterp(pGrid,switch_P)
switchFuncJockNow = LinearInterp(pGrid,switch_J)
# Make and return this period's solution
solution_now = FashionSolution(VfuncJock=VfuncJockNow,
VfuncPunk=VfuncPunkNow,
switchFuncJock=switchFuncJockNow,
switchFuncPunk=switchFuncPunkNow)
return solution_now |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def calcPunkProp(sNow):
'''
Calculates the proportion of punks in the population, given data from each type.
Parameters
----------
pNow : [np.array]
List of arrays of binary data, representing the fashion choice of each
agent in each type of this market (0=jock, 1=punk).
pop_size : [int]
List with the number of agents of each type in the market. Unused.
'''
sNowX = np.asarray(sNow).flatten()
pNow = np.mean(sNowX)
return FashionMarketInfo(pNow) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def calcFashionEvoFunc(pNow):
'''
Calculates a new approximate dynamic rule for the evolution of the proportion
of punks as a linear function and a "shock width".
Parameters
----------
pNow : [float]
List describing the history of the proportion of punks in the population.
Returns
-------
(unnamed) : FashionEvoFunc
A new rule for the evolution of the population punk proportion, based on
the history in input pNow.
'''
pNowX = np.array(pNow)
T = pNowX.size
p_t = pNowX[100:(T-1)]
p_tp1 = pNowX[101:T]
pNextSlope, pNextIntercept, trash1, trash2, trash3 = stats.linregress(p_t,p_tp1)
pPopExp = pNextIntercept + pNextSlope*p_t
pPopErrSq= (pPopExp - p_tp1)**2
pNextStd = np.sqrt(np.mean(pPopErrSq))
print(str(pNextIntercept) + ', ' + str(pNextSlope) + ', ' + str(pNextStd))
return FashionEvoFunc(pNextIntercept,pNextSlope,2*pNextStd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def updateEvolution(self):
'''
Updates the "population punk proportion" evolution array. Fasion victims
believe that the proportion of punks in the subsequent period is a linear
function of the proportion of punks this period, subject to a uniform
shock. Given attributes of self pNextIntercept, pNextSlope, pNextCount,
pNextWidth, and pGrid, this method generates a new array for the attri-
bute pEvolution, representing a discrete approximation of next period
states for each current period state in pGrid.
Parameters
----------
none
Returns
-------
none
'''
self.pEvolution = np.zeros((self.pCount,self.pNextCount))
for j in range(self.pCount):
pNow = self.pGrid[j]
pNextMean = self.pNextIntercept + self.pNextSlope*pNow
dist = approxUniform(N=self.pNextCount,bot=pNextMean-self.pNextWidth,top=pNextMean+self.pNextWidth)[1]
self.pEvolution[j,:] = dist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def reset(self):
'''
Resets this agent type to prepare it for a new simulation run. This
includes resetting the random number generator and initializing the style
of each agent of this type.
'''
self.resetRNG()
sNow = np.zeros(self.pop_size)
Shk = self.RNG.rand(self.pop_size)
sNow[Shk < self.p_init] = 1
self.sNow = sNow |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def postSolve(self):
'''
Unpack the behavioral and value functions for more parsimonious access.
Parameters
----------
none
Returns
-------
none
'''
self.switchFuncPunk = self.solution[0].switchFuncPunk
self.switchFuncJock = self.solution[0].switchFuncJock
self.VfuncPunk = self.solution[0].VfuncPunk
self.VfuncJock = self.solution[0].VfuncJock |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def solvePerfForesight(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
'''
Solves a single period consumption-saving problem for a consumer with perfect foresight.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns
-------
solution : ConsumerSolution
The solution to this period's problem.
'''
solver = ConsPerfForesightSolver(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac)
solution = solver.solve()
return solution |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def constructAssetsGrid(parameters):
'''
Constructs the base grid of post-decision states, representing end-of-period
assets above the absolute minimum.
All parameters are passed as attributes of the single input parameters. The
input can be an instance of a ConsumerType, or a custom Parameters class.
Parameters
----------
aXtraMin: float
Minimum value for the a-grid
aXtraMax: float
Maximum value for the a-grid
aXtraCount: int
Size of the a-grid
aXtraExtra: [float]
Extra values for the a-grid.
exp_nest: int
Level of nesting for the exponentially spaced grid
Returns
-------
aXtraGrid: np.ndarray
Base array of values for the post-decision-state grid.
'''
# Unpack the parameters
aXtraMin = parameters.aXtraMin
aXtraMax = parameters.aXtraMax
aXtraCount = parameters.aXtraCount
aXtraExtra = parameters.aXtraExtra
grid_type = 'exp_mult'
exp_nest = parameters.aXtraNestFac
# Set up post decision state grid:
aXtraGrid = None
if grid_type == "linear":
aXtraGrid = np.linspace(aXtraMin, aXtraMax, aXtraCount)
elif grid_type == "exp_mult":
aXtraGrid = makeGridExpMult(ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=exp_nest)
else:
raise Exception("grid_type not recognized in __init__." + \
"Please ensure grid_type is 'linear' or 'exp_mult'")
# Add in additional points for the grid:
for a in aXtraExtra:
if (a is not None):
if a not in aXtraGrid:
j = aXtraGrid.searchsorted(a)
aXtraGrid = np.insert(aXtraGrid, j, a)
return aXtraGrid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def assignParameters(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
'''
Saves necessary parameters as attributes of self for use by other methods.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns
-------
none
'''
self.solution_next = solution_next
self.DiscFac = DiscFac
self.LivPrb = LivPrb
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def defValueFuncs(self):
'''
Defines the value and marginal value function for this period.
Parameters
----------
none
Returns
-------
none
'''
MPCnvrs = self.MPC**(-self.CRRA/(1.0-self.CRRA))
vFuncNvrs = LinearInterp(np.array([self.mNrmMin, self.mNrmMin+1.0]),np.array([0.0, MPCnvrs]))
self.vFunc = ValueFunc(vFuncNvrs,self.CRRA)
self.vPfunc = MargValueFunc(self.cFunc,self.CRRA) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def solve(self):
'''
Solves the one period perfect foresight consumption-saving problem.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to this period's problem.
'''
self.defUtilityFuncs()
self.DiscFacEff = self.DiscFac*self.LivPrb
self.makePFcFunc()
self.defValueFuncs()
solution = ConsumerSolution(cFunc=self.cFunc, vFunc=self.vFunc, vPfunc=self.vPfunc,
mNrmMin=self.mNrmMin, hNrm=self.hNrmNow,
MPCmin=self.MPC, MPCmax=self.MPC)
#solution = self.addSSmNrm(solution)
return solution |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def assignParameters(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Assigns period parameters as attributes of self for use by other methods
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
none
'''
ConsPerfForesightSolver.assignParameters(self,solution_next,DiscFac,LivPrb,
CRRA,Rfree,PermGroFac)
self.BoroCnstArt = BoroCnstArt
self.IncomeDstn = IncomeDstn
self.aXtraGrid = aXtraGrid
self.vFuncBool = vFuncBool
self.CubicBool = CubicBool |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def prepareToSolve(self):
'''
Perform preparatory work before calculating the unconstrained consumption
function.
Parameters
----------
none
Returns
-------
none
'''
self.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac)
self.defBoroCnst(self.BoroCnstArt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def prepareToCalcEndOfPrdvP(self):
'''
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period.
Parameters
----------
none
Returns
-------
aNrmNow : np.array
A 1D array of end-of-period assets; also stored as attribute of self.
'''
# We define aNrmNow all the way from BoroCnstNat up to max(self.aXtraGrid)
# even if BoroCnstNat < BoroCnstArt, so we can construct the consumption
# function as the lower envelope of the (by the artificial borrowing con-
# straint) uconstrained consumption function, and the artificially con-
# strained consumption function.
aNrmNow = np.asarray(self.aXtraGrid) + self.BoroCnstNat
ShkCount = self.TranShkValsNext.size
aNrm_temp = np.tile(aNrmNow,(ShkCount,1))
# Tile arrays of the income shocks and put them into useful shapes
aNrmCount = aNrmNow.shape[0]
PermShkVals_temp = (np.tile(self.PermShkValsNext,(aNrmCount,1))).transpose()
TranShkVals_temp = (np.tile(self.TranShkValsNext,(aNrmCount,1))).transpose()
ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aNrmCount,1))).transpose()
# Get cash on hand next period
mNrmNext = self.Rfree/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp
# Store and report the results
self.PermShkVals_temp = PermShkVals_temp
self.ShkPrbs_temp = ShkPrbs_temp
self.mNrmNext = mNrmNext
self.aNrmNow = aNrmNow
return aNrmNow |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def solve(self):
'''
Solves a one period consumption saving problem with risky income.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem.
'''
aNrm = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
solution = self.makeBasicSolution(EndOfPrdvP,aNrm,self.makeLinearcFunc)
solution = self.addMPCandHumanWealth(solution)
return solution |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def makeCubiccFunc(self,mNrm,cNrm):
'''
Makes a cubic spline interpolation of the unconstrained consumption
function for this period.
Parameters
----------
mNrm : np.array
Corresponding market resource points for interpolation.
cNrm : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
'''
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*self.PermGroFac**(-self.CRRA-1.0)* \
np.sum(self.PermShkVals_temp**(-self.CRRA-1.0)*
self.vPPfuncNext(self.mNrmNext)*self.ShkPrbs_temp,axis=0)
dcda = EndOfPrdvPP/self.uPP(np.array(cNrm[1:]))
MPC = dcda/(dcda+1.)
MPC = np.insert(MPC,0,self.MPCmaxNow)
cFuncNowUnc = CubicInterp(mNrm,cNrm,MPC,self.MPCminNow*self.hNrmNow,self.MPCminNow)
return cFuncNowUnc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def addvFunc(self,solution,EndOfPrdvP):
'''
Creates the value function for this period and adds it to the solution.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, likely including the
consumption function, marginal value function, etc.
EndOfPrdvP : np.array
Array of end-of-period marginal value of assets corresponding to the
asset values in self.aNrmNow.
Returns
-------
solution : ConsumerSolution
The single period solution passed as an input, but now with the
value function (defined over market resources m) as an attribute.
'''
self.makeEndOfPrdvFunc(EndOfPrdvP)
solution.vFunc = self.makevFunc(solution)
return solution |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def makevFunc(self,solution):
'''
Creates the value function for this period, defined over market resources m.
self must have the attribute EndOfPrdvFunc in order to execute.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFunc
A representation of the value function for this period, defined over
normalized market resources m: v = vFuncNow(m).
'''
# Compute expected value and marginal value on a grid of market resources
mNrm_temp = self.mNrmMinNow + self.aXtraGrid
cNrmNow = solution.cFunc(mNrm_temp)
aNrmNow = mNrm_temp - cNrmNow
vNrmNow = self.u(cNrmNow) + self.EndOfPrdvFunc(aNrmNow)
vPnow = self.uP(cNrmNow)
# Construct the beginning-of-period value function
vNvrs = self.uinv(vNrmNow) # value transformed through inverse utility
vNvrsP = vPnow*self.uinvP(vNrmNow)
mNrm_temp = np.insert(mNrm_temp,0,self.mNrmMinNow)
vNvrs = np.insert(vNvrs,0,0.0)
vNvrsP = np.insert(vNvrsP,0,self.MPCmaxEff**(-self.CRRA/(1.0-self.CRRA)))
MPCminNvrs = self.MPCminNow**(-self.CRRA/(1.0-self.CRRA))
vNvrsFuncNow = CubicInterp(mNrm_temp,vNvrs,vNvrsP,MPCminNvrs*self.hNrmNow,MPCminNvrs)
vFuncNow = ValueFunc(vNvrsFuncNow,self.CRRA)
return vFuncNow |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def prepareToCalcEndOfPrdvP(self):
'''
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period. This differs from the baseline case because
different savings choices yield different interest rates.
Parameters
----------
none
Returns
-------
aNrmNow : np.array
A 1D array of end-of-period assets; also stored as attribute of self.
'''
KinkBool = self.Rboro > self.Rsave # Boolean indicating that there is actually a kink.
# When Rboro == Rsave, this method acts just like it did in IndShock.
# When Rboro < Rsave, the solver would have terminated when it was called.
# Make a grid of end-of-period assets, including *two* copies of a=0
if KinkBool:
aNrmNow = np.sort(np.hstack((np.asarray(self.aXtraGrid) + self.mNrmMinNow,
np.array([0.0,0.0]))))
else:
aNrmNow = np.asarray(self.aXtraGrid) + self.mNrmMinNow
aXtraCount = aNrmNow.size
# Make tiled versions of the assets grid and income shocks
ShkCount = self.TranShkValsNext.size
aNrm_temp = np.tile(aNrmNow,(ShkCount,1))
PermShkVals_temp = (np.tile(self.PermShkValsNext,(aXtraCount,1))).transpose()
TranShkVals_temp = (np.tile(self.TranShkValsNext,(aXtraCount,1))).transpose()
ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aXtraCount,1))).transpose()
# Make a 1D array of the interest factor at each asset gridpoint
Rfree_vec = self.Rsave*np.ones(aXtraCount)
if KinkBool:
Rfree_vec[0:(np.sum(aNrmNow<=0)-1)] = self.Rboro
self.Rfree = Rfree_vec
Rfree_temp = np.tile(Rfree_vec,(ShkCount,1))
# Make an array of market resources that we could have next period,
# considering the grid of assets and the income shocks that could occur
mNrmNext = Rfree_temp/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp
# Recalculate the minimum MPC and human wealth using the interest factor on saving.
# This overwrites values from setAndUpdateValues, which were based on Rboro instead.
if KinkBool:
PatFacTop = ((self.Rsave*self.DiscFacEff)**(1.0/self.CRRA))/self.Rsave
self.MPCminNow = 1.0/(1.0 + PatFacTop/self.solution_next.MPCmin)
self.hNrmNow = self.PermGroFac/self.Rsave*(np.dot(self.ShkPrbsNext,
self.TranShkValsNext*self.PermShkValsNext) + self.solution_next.hNrm)
# Store some of the constructed arrays for later use and return the assets grid
self.PermShkVals_temp = PermShkVals_temp
self.ShkPrbs_temp = ShkPrbs_temp
self.mNrmNext = mNrmNext
self.aNrmNow = aNrmNow
return aNrmNow |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def simDeath(self):
'''
Determines which agents die this period and must be replaced. Uses the sequence in LivPrb
to determine survival probabilities for each agent.
Parameters
----------
None
Returns
-------
which_agents : np.array(bool)
Boolean array of size AgentCount indicating which agents die.
'''
# Determine who dies
DiePrb_by_t_cycle = 1.0 - np.asarray(self.LivPrb)
DiePrb = DiePrb_by_t_cycle[self.t_cycle-1] # Time has already advanced, so look back one
DeathShks = drawUniform(N=self.AgentCount,seed=self.RNG.randint(0,2**31-1))
which_agents = DeathShks < DiePrb
if self.T_age is not None: # Kill agents that have lived for too many periods
too_old = self.t_age >= self.T_age
which_agents = np.logical_or(which_agents,too_old)
return which_agents |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getStates(self):
'''
Calculates updated values of normalized market resources and permanent income level for each
agent. Uses pLvlNow, aNrmNow, PermShkNow, TranShkNow.
Parameters
----------
None
Returns
-------
None
'''
pLvlPrev = self.pLvlNow
aNrmPrev = self.aNrmNow
RfreeNow = self.getRfree()
# Calculate new states: normalized market resources and permanent income level
self.pLvlNow = pLvlPrev*self.PermShkNow # Updated permanent income level
self.PlvlAggNow = self.PlvlAggNow*self.PermShkAggNow # Updated aggregate permanent productivity level
ReffNow = RfreeNow/self.PermShkNow # "Effective" interest factor on normalized assets
self.bNrmNow = ReffNow*aNrmPrev # Bank balances before labor income
self.mNrmNow = self.bNrmNow + self.TranShkNow # Market resources after income
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def updateIncomeProcess(self):
'''
Updates this agent's income process based on his own attributes.
Parameters
----------
none
Returns:
-----------
none
'''
original_time = self.time_flow
self.timeFwd()
IncomeDstn, PermShkDstn, TranShkDstn = constructLognormalIncomeProcessUnemployment(self)
self.IncomeDstn = IncomeDstn
self.PermShkDstn = PermShkDstn
self.TranShkDstn = TranShkDstn
self.addToTimeVary('IncomeDstn','PermShkDstn','TranShkDstn')
if not original_time:
self.timeRev() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def updateAssetsGrid(self):
'''
Updates this agent's end-of-period assets grid by constructing a multi-
exponentially spaced grid of aXtra values.
Parameters
----------
none
Returns
-------
none
'''
aXtraGrid = constructAssetsGrid(self)
self.aXtraGrid = aXtraGrid
self.addToTimeInv('aXtraGrid') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_file(fname):
"""Given the name of a pcap file, open it, decode the contents and yield each packet.""" |
if _debug: decode_file._debug("decode_file %r", fname)
if not pcap:
raise RuntimeError("failed to import pcap")
# create a pcap object, reading from the file
p = pcap.pcap(fname)
# loop through the packets
for i, (timestamp, data) in enumerate(p):
try:
pkt = decode_packet(data)
if not pkt:
continue
except Exception as err:
if _debug: decode_file._debug(" - exception decoding packet %d: %r", i+1, err)
continue
# save the packet number (as viewed in Wireshark) and timestamp
pkt._number = i + 1
pkt._timestamp = timestamp
yield pkt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(*args):
"""Call to stop running, may be called with a signum and frame parameter if called as a signal handler.""" |
if _debug: stop._debug("stop")
global running, taskManager
if args:
sys.stderr.write("===== TERM Signal, %s\n" % time.strftime("%d-%b-%Y %H:%M:%S"))
sys.stderr.flush()
running = False
# trigger the task manager event
if taskManager and taskManager.trigger:
if _debug: stop._debug(" - trigger")
taskManager.trigger.set() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_stack(sig, frame):
"""Signal handler to print a stack trace and some interesting values.""" |
if _debug: print_stack._debug("print_stack %r %r", sig, frame)
global running, deferredFns, sleeptime
sys.stderr.write("==== USR1 Signal, %s\n" % time.strftime("%d-%b-%Y %H:%M:%S"))
sys.stderr.write("---------- globals\n")
sys.stderr.write(" running: %r\n" % (running,))
sys.stderr.write(" deferredFns: %r\n" % (deferredFns,))
sys.stderr.write(" sleeptime: %r\n" % (sleeptime,))
sys.stderr.write("---------- stack\n")
traceback.print_stack(frame)
# make a list of interesting frames
flist = []
f = frame
while f.f_back:
flist.append(f)
f = f.f_back
# reverse the list so it is in the same order as print_stack
flist.reverse()
for f in flist:
sys.stderr.write("---------- frame: %s\n" % (f,))
for k, v in f.f_locals.items():
sys.stderr.write(" %s: %r\n" % (k, v))
sys.stderr.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compose_capability(base, *classes):
"""Create a new class starting with the base and adding capabilities.""" |
if _debug: compose_capability._debug("compose_capability %r %r", base, classes)
# make sure the base is a Collector
if not issubclass(base, Collector):
raise TypeError("base must be a subclass of Collector")
# make sure you only add capabilities
for cls in classes:
if not issubclass(cls, Capability):
raise TypeError("%s is not a Capability subclass" % (cls,))
# start with everything the base has and add the new ones
bases = (base,) + classes
# build a new name
name = base.__name__
for cls in classes:
name += '+' + cls.__name__
# return a new type
return type(name, bases, {}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_capability(base, *classes):
"""Add capabilites to an existing base, all objects get the additional functionality, but don't get inited. Use with great care!""" |
if _debug: add_capability._debug("add_capability %r %r", base, classes)
# start out with a collector
if not issubclass(base, Collector):
raise TypeError("base must be a subclass of Collector")
# make sure you only add capabilities
for cls in classes:
if not issubclass(cls, Capability):
raise TypeError("%s is not a Capability subclass" % (cls,))
base.__bases__ += classes
for cls in classes:
base.__name__ += '+' + cls.__name__ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _search_capability(self, base):
"""Given a class, return a list of all of the derived classes that are themselves derived from Capability.""" |
if _debug: Collector._debug("_search_capability %r", base)
rslt = []
for cls in base.__bases__:
if issubclass(cls, Collector):
map( rslt.append, self._search_capability(cls))
elif issubclass(cls, Capability):
rslt.append(cls)
if _debug: Collector._debug(" - rslt: %r", rslt)
return rslt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def capability_functions(self, fn):
"""This generator yields functions that match the requested capability sorted by z-index.""" |
if _debug: Collector._debug("capability_functions %r", fn)
# build a list of functions to call
fns = []
for cls in self.capabilities:
xfn = getattr(cls, fn, None)
if _debug: Collector._debug(" - cls, xfn: %r, %r", cls, xfn)
if xfn:
fns.append( (getattr(cls, '_zindex', None), xfn) )
# sort them by z-index
fns.sort(key=lambda v: v[0])
if _debug: Collector._debug(" - fns: %r", fns)
# now yield them in order
for xindx, xfn in fns:
if _debug: Collector._debug(" - yield xfn: %r", xfn)
yield xfn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_capability(self, cls):
"""Add a capability to this object.""" |
if _debug: Collector._debug("add_capability %r", cls)
# the new type has everything the current one has plus this new one
bases = (self.__class__, cls)
if _debug: Collector._debug(" - bases: %r", bases)
# save this additional class
self.capabilities.append(cls)
# morph into a new type
newtype = type(self.__class__.__name__ + '+' + cls.__name__, bases, {})
self.__class__ = newtype
# allow the new type to init
if hasattr(cls, '__init__'):
if _debug: Collector._debug(" - calling %r.__init__", cls)
cls.__init__(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _merge(*args):
"""Create a composite pattern and compile it.""" |
return re.compile(r'^' + r'[/-]'.join(args) + r'(?:\s+' + _dow + ')?$') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode(self, pdu):
"""Encode a tag on the end of the PDU.""" |
# check for special encoding
if (self.tagClass == Tag.contextTagClass):
data = 0x08
elif (self.tagClass == Tag.openingTagClass):
data = 0x0E
elif (self.tagClass == Tag.closingTagClass):
data = 0x0F
else:
data = 0x00
# encode the tag number part
if (self.tagNumber < 15):
data += (self.tagNumber << 4)
else:
data += 0xF0
# encode the length/value/type part
if (self.tagLVT < 5):
data += self.tagLVT
else:
data += 0x05
# save this and the extended tag value
pdu.put( data )
if (self.tagNumber >= 15):
pdu.put(self.tagNumber)
# really short lengths are already done
if (self.tagLVT >= 5):
if (self.tagLVT <= 253):
pdu.put( self.tagLVT )
elif (self.tagLVT <= 65535):
pdu.put( 254 )
pdu.put_short( self.tagLVT )
else:
pdu.put( 255 )
pdu.put_long( self.tagLVT )
# now put the data
pdu.put_data(self.tagData) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def app_to_object(self):
"""Return the application object encoded by the tag.""" |
if self.tagClass != Tag.applicationTagClass:
raise ValueError("application tag required")
# get the class to build
klass = self._app_tag_class[self.tagNumber]
if not klass:
return None
# build an object, tell it to decode this tag, and return it
return klass(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Pop(self):
"""Remove the tag from the front of the list and return it.""" |
if self.tagList:
tag = self.tagList[0]
del self.tagList[0]
else:
tag = None
return tag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_context(self, context):
"""Return a tag or a list of tags context encoded.""" |
# forward pass
i = 0
while i < len(self.tagList):
tag = self.tagList[i]
# skip application stuff
if tag.tagClass == Tag.applicationTagClass:
pass
# check for context encoded atomic value
elif tag.tagClass == Tag.contextTagClass:
if tag.tagNumber == context:
return tag
# check for context encoded group
elif tag.tagClass == Tag.openingTagClass:
keeper = tag.tagNumber == context
rslt = []
i += 1
lvl = 0
while i < len(self.tagList):
tag = self.tagList[i]
if tag.tagClass == Tag.openingTagClass:
lvl += 1
elif tag.tagClass == Tag.closingTagClass:
lvl -= 1
if lvl < 0: break
rslt.append(tag)
i += 1
# make sure everything balances
if lvl >= 0:
raise InvalidTag("mismatched open/close tags")
# get everything we need?
if keeper:
return TagList(rslt)
else:
raise InvalidTag("unexpected tag")
# try the next tag
i += 1
# nothing found
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode(self, pdu):
"""decode the tags from a PDU.""" |
while pdu.pduData:
self.tagList.append( Tag(pdu) ) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coerce(cls, arg):
"""Given an arg, return the appropriate value given the class.""" |
try:
return cls(arg).value
except (ValueError, TypeError):
raise InvalidParameterDatatype("%s coerce error" % (cls.__name__,)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keylist(self):
"""Return a list of names in order by value.""" |
items = self.enumerations.items()
items.sort(lambda a, b: self.cmp(a[1], b[1]))
# last item has highest value
rslt = [None] * (items[-1][1] + 1)
# map the values
for key, value in items:
rslt[value] = key
# return the result
return rslt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def CalcDayOfWeek(self):
"""Calculate the correct day of the week.""" |
# rip apart the value
year, month, day, day_of_week = self.value
# assume the worst
day_of_week = 255
# check for special values
if year == 255:
pass
elif month in _special_mon_inv:
pass
elif day in _special_day_inv:
pass
else:
try:
today = time.mktime( (year + 1900, month, day, 0, 0, 0, 0, 0, -1) )
day_of_week = time.gmtime(today)[6] + 1
except OverflowError:
pass
# put it back together
self.value = (year, month, day, day_of_week) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def now(self, when=None):
"""Set the current value to the correct tuple based on the seconds since the epoch. If 'when' is not provided, get the current time from the task manager. """ |
if when is None:
when = _TaskManager().get_time()
tup = time.localtime(when)
self.value = (tup[0]-1900, tup[1], tup[2], tup[6] + 1)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_device_info(self, key):
"""Return true iff cache has information about the device.""" |
if _debug: DeviceInfoCache._debug("has_device_info %r", key)
return key in self.cache |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iam_device_info(self, apdu):
"""Create a device information record based on the contents of an IAmRequest and put it in the cache.""" |
if _debug: DeviceInfoCache._debug("iam_device_info %r", apdu)
# make sure the apdu is an I-Am
if not isinstance(apdu, IAmRequest):
raise ValueError("not an IAmRequest: %r" % (apdu,))
# get the device instance
device_instance = apdu.iAmDeviceIdentifier[1]
# get the existing cache record if it exists
device_info = self.cache.get(device_instance, None)
# maybe there is a record for this address
if not device_info:
device_info = self.cache.get(apdu.pduSource, None)
# make a new one using the class provided
if not device_info:
device_info = self.device_info_class(device_instance, apdu.pduSource)
# jam in the correct values
device_info.deviceIdentifier = device_instance
device_info.address = apdu.pduSource
device_info.maxApduLengthAccepted = apdu.maxAPDULengthAccepted
device_info.segmentationSupported = apdu.segmentationSupported
device_info.vendorID = apdu.vendorID
# tell the cache this is an updated record
self.update_device_info(device_info) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_device_info(self, device_info):
"""The application has updated one or more fields in the device information record and the cache needs to be updated to reflect the changes. If this is a cached version of a persistent record then this is the opportunity to update the database.""" |
if _debug: DeviceInfoCache._debug("update_device_info %r", device_info)
# give this a reference count if it doesn't have one
if not hasattr(device_info, '_ref_count'):
device_info._ref_count = 0
# get the current keys
cache_id, cache_address = getattr(device_info, '_cache_keys', (None, None))
if (cache_id is not None) and (device_info.deviceIdentifier != cache_id):
if _debug: DeviceInfoCache._debug(" - device identifier updated")
# remove the old reference, add the new one
del self.cache[cache_id]
self.cache[device_info.deviceIdentifier] = device_info
if (cache_address is not None) and (device_info.address != cache_address):
if _debug: DeviceInfoCache._debug(" - device address updated")
# remove the old reference, add the new one
del self.cache[cache_address]
self.cache[device_info.address] = device_info
# update the keys
device_info._cache_keys = (device_info.deviceIdentifier, device_info.address) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def acquire(self, key):
"""Return the known information about the device and mark the record as being used by a segmenation state machine.""" |
if _debug: DeviceInfoCache._debug("acquire %r", key)
if isinstance(key, int):
device_info = self.cache.get(key, None)
elif not isinstance(key, Address):
raise TypeError("key must be integer or an address")
elif key.addrType not in (Address.localStationAddr, Address.remoteStationAddr):
raise TypeError("address must be a local or remote station")
else:
device_info = self.cache.get(key, None)
if device_info:
if _debug: DeviceInfoCache._debug(" - reference bump")
device_info._ref_count += 1
if _debug: DeviceInfoCache._debug(" - device_info: %r", device_info)
return device_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def release(self, device_info):
"""This function is called by the segmentation state machine when it has finished with the device information.""" |
if _debug: DeviceInfoCache._debug("release %r", device_info)
# this information record might be used by more than one SSM
if device_info._ref_count == 0:
raise RuntimeError("reference count")
# decrement the reference count
device_info._ref_count -= 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_services_supported(self):
"""Return a ServicesSupported bit string based in introspection, look for helper methods that match confirmed and unconfirmed services.""" |
if _debug: Application._debug("get_services_supported")
services_supported = ServicesSupported()
# look through the confirmed services
for service_choice, service_request_class in confirmed_request_types.items():
service_helper = "do_" + service_request_class.__name__
if hasattr(self, service_helper):
service_supported = ConfirmedServiceChoice._xlate_table[service_choice]
services_supported[service_supported] = 1
# look through the unconfirmed services
for service_choice, service_request_class in unconfirmed_request_types.items():
service_helper = "do_" + service_request_class.__name__
if hasattr(self, service_helper):
service_supported = UnconfirmedServiceChoice._xlate_table[service_choice]
services_supported[service_supported] = 1
# return the bit list
return services_supported |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_next_task(self):
"""get the next task if there's one that should be processed, and return how long it will be until the next one should be processed.""" |
if _debug: TaskManager._debug("get_next_task")
# get the time
now = _time()
task = None
delta = None
if self.tasks:
# look at the first task
when, n, nxttask = self.tasks[0]
if when <= now:
# pull it off the list and mark that it's no longer scheduled
heappop(self.tasks)
task = nxttask
task.isScheduled = False
if self.tasks:
when, n, nxttask = self.tasks[0]
# peek at the next task, return how long to wait
delta = max(when - now, 0.0)
else:
delta = when - now
# return the task to run and how long to wait for the next one
return (task, delta) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_date(date, date_pattern):
""" Match a specific date, a four-tuple with no special values, with a date pattern, four-tuple possibly having special values. """ |
# unpack the date and pattern
year, month, day, day_of_week = date
year_p, month_p, day_p, day_of_week_p = date_pattern
# check the year
if year_p == 255:
# any year
pass
elif year != year_p:
# specific year
return False
# check the month
if month_p == 255:
# any month
pass
elif month_p == 13:
# odd months
if (month % 2) == 0:
return False
elif month_p == 14:
# even months
if (month % 2) == 1:
return False
elif month != month_p:
# specific month
return False
# check the day
if day_p == 255:
# any day
pass
elif day_p == 32:
# last day of the month
last_day = calendar.monthrange(year + 1900, month)[1]
if day != last_day:
return False
elif day_p == 33:
# odd days of the month
if (day % 2) == 0:
return False
elif day_p == 34:
# even days of the month
if (day % 2) == 1:
return False
elif day != day_p:
# specific day
return False
# check the day of week
if day_of_week_p == 255:
# any day of the week
pass
elif day_of_week != day_of_week_p:
# specific day of the week
return False
# all tests pass
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def datetime_to_time(date, time):
"""Take the date and time 4-tuples and return the time in seconds since the epoch as a floating point number.""" |
if (255 in date) or (255 in time):
raise RuntimeError("specific date and time required")
time_tuple = (
date[0]+1900, date[1], date[2],
time[0], time[1], time[2],
0, 0, -1,
)
return _mktime(time_tuple) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_reliability(self, old_value=None, new_value=None):
"""This function is called when the object is created and after one of its configuration properties has changed. The new and old value parameters are ignored, this is called after the property has been changed and this is only concerned with the current value.""" |
if _debug: LocalScheduleObject._debug("_check_reliability %r %r", old_value, new_value)
try:
schedule_default = self.scheduleDefault
if schedule_default is None:
raise ValueError("scheduleDefault expected")
if not isinstance(schedule_default, Atomic):
raise TypeError("scheduleDefault must be an instance of an atomic type")
schedule_datatype = schedule_default.__class__
if _debug: LocalScheduleObject._debug(" - schedule_datatype: %r", schedule_datatype)
if (self.weeklySchedule is None) and (self.exceptionSchedule is None):
raise ValueError("schedule required")
# check the weekly schedule values
if self.weeklySchedule:
for daily_schedule in self.weeklySchedule:
for time_value in daily_schedule.daySchedule:
if _debug: LocalScheduleObject._debug(" - daily time_value: %r", time_value)
if time_value is None:
pass
elif not isinstance(time_value.value, (Null, schedule_datatype)):
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
schedule_datatype,
time_value.__class__,
)
raise TypeError("wrong type")
elif 255 in time_value.time:
if _debug: LocalScheduleObject._debug(" - wildcard in time")
raise ValueError("must be a specific time")
# check the exception schedule values
if self.exceptionSchedule:
for special_event in self.exceptionSchedule:
for time_value in special_event.listOfTimeValues:
if _debug: LocalScheduleObject._debug(" - special event time_value: %r", time_value)
if time_value is None:
pass
elif not isinstance(time_value.value, (Null, schedule_datatype)):
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
schedule_datatype,
time_value.__class__,
)
raise TypeError("wrong type")
# check list of object property references
obj_prop_refs = self.listOfObjectPropertyReferences
if obj_prop_refs:
for obj_prop_ref in obj_prop_refs:
if obj_prop_ref.deviceIdentifier:
raise RuntimeError("no external references")
# get the datatype of the property to be written
obj_type = obj_prop_ref.objectIdentifier[0]
datatype = get_datatype(obj_type, obj_prop_ref.propertyIdentifier)
if _debug: LocalScheduleObject._debug(" - datatype: %r", datatype)
if issubclass(datatype, Array) and (obj_prop_ref.propertyArrayIndex is not None):
if obj_prop_ref.propertyArrayIndex == 0:
datatype = Unsigned
else:
datatype = datatype.subtype
if _debug: LocalScheduleObject._debug(" - datatype: %r", datatype)
if datatype is not schedule_datatype:
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
datatype,
schedule_datatype,
)
raise TypeError("wrong type")
# all good
self.reliability = 'noFaultDetected'
if _debug: LocalScheduleObject._debug(" - no fault detected")
except Exception as err:
if _debug: LocalScheduleObject._debug(" - exception: %r", err)
self.reliability = 'configurationError' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def present_value_changed(self, old_value, new_value):
"""This function is called when the presentValue of the local schedule object has changed, both internally by this interpreter, or externally by some client using WriteProperty.""" |
if _debug: LocalScheduleInterpreter._debug("present_value_changed %s %s", old_value, new_value)
# if this hasn't been added to an application, there's nothing to do
if not self.sched_obj._app:
if _debug: LocalScheduleInterpreter._debug(" - no application")
return
# process the list of [device] object property [array index] references
obj_prop_refs = self.sched_obj.listOfObjectPropertyReferences
if not obj_prop_refs:
if _debug: LocalScheduleInterpreter._debug(" - no writes defined")
return
# primitive values just set the value part
new_value = new_value.value
# loop through the writes
for obj_prop_ref in obj_prop_refs:
if obj_prop_ref.deviceIdentifier:
if _debug: LocalScheduleInterpreter._debug(" - no externals")
continue
# get the object from the application
obj = self.sched_obj._app.get_object_id(obj_prop_ref.objectIdentifier)
if not obj:
if _debug: LocalScheduleInterpreter._debug(" - no object")
continue
# try to change the value
try:
obj.WriteProperty(
obj_prop_ref.propertyIdentifier,
new_value,
arrayIndex=obj_prop_ref.propertyArrayIndex,
priority=self.sched_obj.priorityForWriting,
)
if _debug: LocalScheduleInterpreter._debug(" - success")
except Exception as err:
if _debug: LocalScheduleInterpreter._debug(" - error: %r", err) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_gc(self, args):
"""gc - print out garbage collection information""" |
### humm...
instance_type = getattr(types, 'InstanceType', object)
# snapshot of counts
type2count = {}
type2all = {}
for o in gc.get_objects():
if type(o) == instance_type:
type2count[o.__class__] = type2count.get(o.__class__,0) + 1
type2all[o.__class__] = type2all.get(o.__class__,0) + sys.getrefcount(o)
# count the things that have changed
ct = [ ( t.__module__
, t.__name__
, type2count[t]
, type2count[t] - self.type2count.get(t,0)
, type2all[t] - self.type2all.get(t,0)
) for t in type2count.iterkeys()
]
# ready for the next time
self.type2count = type2count
self.type2all = type2all
fmt = "%-30s %-30s %6s %6s %6s\n"
self.stdout.write(fmt % ("Module", "Type", "Count", "dCount", "dRef"))
# sorted by count
ct.sort(lambda x, y: cmp(y[2], x[2]))
for i in range(min(10,len(ct))):
m, n, c, delta1, delta2 = ct[i]
self.stdout.write(fmt % (m, n, c, delta1, delta2))
self.stdout.write("\n")
self.stdout.write(fmt % ("Module", "Type", "Count", "dCount", "dRef"))
# sorted by module and class
ct.sort()
for m, n, c, delta1, delta2 in ct:
if delta1 or delta2:
self.stdout.write(fmt % (m, n, c, delta1, delta2))
self.stdout.write("\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_buggers(self, args):
"""buggers - list the console logging handlers""" |
args = args.split()
if _debug: ConsoleCmd._debug("do_buggers %r", args)
if not self.handlers:
self.stdout.write("no handlers\n")
else:
self.stdout.write("handlers: ")
self.stdout.write(', '.join(loggerName or '__root__' for loggerName in self.handlers))
self.stdout.write("\n")
loggers = logging.Logger.manager.loggerDict.keys()
for loggerName in sorted(loggers):
if args and (not args[0] in loggerName):
continue
if loggerName in self.handlers:
self.stdout.write("* %s\n" % loggerName)
else:
self.stdout.write(" %s\n" % loggerName)
self.stdout.write("\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_EOF(self, args):
"""Exit on system end of file character""" |
if _debug: ConsoleCmd._debug("do_EOF %r", args)
return self.do_exit(args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_shell(self, args):
"""Pass command to a system shell when line begins with '!'""" |
if _debug: ConsoleCmd._debug("do_shell %r", args)
os.system(args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def confirmation(self, pdu):
"""Decode upstream PDUs and pass them up to the service access point.""" |
if _debug: NetworkAdapter._debug("confirmation %r (net=%r)", pdu, self.adapterNet)
npdu = NPDU(user_data=pdu.pduUserData)
npdu.decode(pdu)
self.adapterSAP.process_npdu(self, npdu) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_npdu(self, npdu):
"""Encode NPDUs from the service access point and send them downstream.""" |
if _debug: NetworkAdapter._debug("process_npdu %r (net=%r)", npdu, self.adapterNet)
pdu = PDU(user_data=npdu.pduUserData)
npdu.encode(pdu)
self.request(pdu) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bind(self, server, net=None, address=None):
"""Create a network adapter object and bind.""" |
if _debug: NetworkServiceAccessPoint._debug("bind %r net=%r address=%r", server, net, address)
# make sure this hasn't already been called with this network
if net in self.adapters:
raise RuntimeError("already bound")
# create an adapter object, add it to our map
adapter = NetworkAdapter(self, net)
self.adapters[net] = adapter
if _debug: NetworkServiceAccessPoint._debug(" - adapters[%r]: %r", net, adapter)
# if the address was given, make it the "local" one
if address and not self.local_address:
self.local_adapter = adapter
self.local_address = address
# bind to the server
bind(adapter, server) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack_ip_addr(addr):
"""Given a six-octet BACnet address, return an IP address tuple.""" |
if isinstance(addr, bytearray):
addr = bytes(addr)
return (socket.inet_ntoa(addr[0:4]), struct.unpack('!H', addr[4:6])[0]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ModuleLogger(globs):
"""Create a module level logger. To debug a module, create a _debug variable in the module, then use the ModuleLogger function to create a "module level" logger. When a handler is added to this logger or a child of this logger, the _debug variable will be incremented. All of the calls within functions or class methods within the module should first check to see if _debug is set to prevent calls to formatter objects that aren't necessary. """ |
# make sure that _debug is defined
if not globs.has_key('_debug'):
raise RuntimeError("define _debug before creating a module logger")
# logger name is the module name
logger_name = globs['__name__']
# create a logger to be assigned to _log
logger = logging.getLogger(logger_name)
# put in a reference to the module globals
logger.globs = globs
# if this is a "root" logger add a default handler for warnings and up
if '.' not in logger_name:
hdlr = logging.StreamHandler()
hdlr.setLevel(logging.WARNING)
hdlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None))
logger.addHandler(hdlr)
return logger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bacpypes_debugging(obj):
"""Function for attaching a debugging logger to a class or function.""" |
# create a logger for this object
logger = logging.getLogger(obj.__module__ + '.' + obj.__name__)
# make it available to instances
obj._logger = logger
obj._debug = logger.debug
obj._info = logger.info
obj._warning = logger.warning
obj._error = logger.error
obj._exception = logger.exception
obj._fatal = logger.fatal
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_node(self, node):
""" Add a node to this network, let the node know which network it's on. """ |
if _debug: Network._debug("add_node %r", node)
self.nodes.append(node)
node.lan = self
# update the node name
if not node.name:
node.name = '%s:%s' % (self.name, node.address) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_node(self, node):
""" Remove a node from this network. """ |
if _debug: Network._debug("remove_node %r", node)
self.nodes.remove(node)
node.lan = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_pdu(self, pdu):
""" Process a PDU by sending a copy to each node as dictated by the addressing and if a node is promiscuous. """ |
if _debug: Network._debug("process_pdu(%s) %r", self.name, pdu)
# if there is a traffic log, call it with the network name and pdu
if self.traffic_log:
self.traffic_log(self.name, pdu)
# randomly drop a packet
if self.drop_percent != 0.0:
if (random.random() * 100.0) < self.drop_percent:
if _debug: Network._debug(" - packet dropped")
return
if pdu.pduDestination == self.broadcast_address:
if _debug: Network._debug(" - broadcast")
for node in self.nodes:
if (pdu.pduSource != node.address):
if _debug: Network._debug(" - match: %r", node)
node.response(deepcopy(pdu))
else:
if _debug: Network._debug(" - unicast")
for node in self.nodes:
if node.promiscuous or (pdu.pduDestination == node.address):
if _debug: Network._debug(" - match: %r", node)
node.response(deepcopy(pdu)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bind(self, lan):
"""bind to a LAN.""" |
if _debug: Node._debug("bind %r", lan)
lan.add_node(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Match(addr1, addr2):
"""Return true iff addr1 matches addr2.""" |
if _debug: Match._debug("Match %r %r", addr1, addr2)
if (addr2.addrType == Address.localBroadcastAddr):
# match any local station
return (addr1.addrType == Address.localStationAddr) or (addr1.addrType == Address.localBroadcastAddr)
elif (addr2.addrType == Address.localStationAddr):
# match a specific local station
return (addr1.addrType == Address.localStationAddr) and (addr1.addrAddr == addr2.addrAddr)
elif (addr2.addrType == Address.remoteBroadcastAddr):
# match any remote station or remote broadcast on a matching network
return ((addr1.addrType == Address.remoteStationAddr) or (addr1.addrType == Address.remoteBroadcastAddr)) \
and (addr1.addrNet == addr2.addrNet)
elif (addr2.addrType == Address.remoteStationAddr):
# match a specific remote station
return (addr1.addrType == Address.remoteStationAddr) and \
(addr1.addrNet == addr2.addrNet) and (addr1.addrAddr == addr2.addrAddr)
elif (addr2.addrType == Address.globalBroadcastAddr):
# match a global broadcast address
return (addr1.addrType == Address.globalBroadcastAddr)
else:
raise RuntimeError, "invalid match combination" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_WhoIsRequest(self, apdu):
"""Respond to a Who-Is request.""" |
if _debug: WhoIsIAmServices._debug("do_WhoIsRequest %r", apdu)
# ignore this if there's no local device
if not self.localDevice:
if _debug: WhoIsIAmServices._debug(" - no local device")
return
# extract the parameters
low_limit = apdu.deviceInstanceRangeLowLimit
high_limit = apdu.deviceInstanceRangeHighLimit
# check for consistent parameters
if (low_limit is not None):
if (high_limit is None):
raise MissingRequiredParameter("deviceInstanceRangeHighLimit required")
if (low_limit < 0) or (low_limit > 4194303):
raise ParameterOutOfRange("deviceInstanceRangeLowLimit out of range")
if (high_limit is not None):
if (low_limit is None):
raise MissingRequiredParameter("deviceInstanceRangeLowLimit required")
if (high_limit < 0) or (high_limit > 4194303):
raise ParameterOutOfRange("deviceInstanceRangeHighLimit out of range")
# see we should respond
if (low_limit is not None):
if (self.localDevice.objectIdentifier[1] < low_limit):
return
if (high_limit is not None):
if (self.localDevice.objectIdentifier[1] > high_limit):
return
# generate an I-Am
self.i_am(address=apdu.pduSource) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_IAmRequest(self, apdu):
"""Respond to an I-Am request.""" |
if _debug: WhoIsIAmServices._debug("do_IAmRequest %r", apdu)
# check for required parameters
if apdu.iAmDeviceIdentifier is None:
raise MissingRequiredParameter("iAmDeviceIdentifier required")
if apdu.maxAPDULengthAccepted is None:
raise MissingRequiredParameter("maxAPDULengthAccepted required")
if apdu.segmentationSupported is None:
raise MissingRequiredParameter("segmentationSupported required")
if apdu.vendorID is None:
raise MissingRequiredParameter("vendorID required")
# extract the device instance number
device_instance = apdu.iAmDeviceIdentifier[1]
if _debug: WhoIsIAmServices._debug(" - device_instance: %r", device_instance)
# extract the source address
device_address = apdu.pduSource
if _debug: WhoIsIAmServices._debug(" - device_address: %r", device_address) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_WhoHasRequest(self, apdu):
"""Respond to a Who-Has request.""" |
if _debug: WhoHasIHaveServices._debug("do_WhoHasRequest, %r", apdu)
# ignore this if there's no local device
if not self.localDevice:
if _debug: WhoIsIAmServices._debug(" - no local device")
return
# if this has limits, check them like Who-Is
if apdu.limits is not None:
# extract the parameters
low_limit = apdu.limits.deviceInstanceRangeLowLimit
high_limit = apdu.limits.deviceInstanceRangeHighLimit
# check for consistent parameters
if (low_limit is None):
raise MissingRequiredParameter("deviceInstanceRangeLowLimit required")
if (low_limit < 0) or (low_limit > 4194303):
raise ParameterOutOfRange("deviceInstanceRangeLowLimit out of range")
if (high_limit is None):
raise MissingRequiredParameter("deviceInstanceRangeHighLimit required")
if (high_limit < 0) or (high_limit > 4194303):
raise ParameterOutOfRange("deviceInstanceRangeHighLimit out of range")
# see we should respond
if (self.localDevice.objectIdentifier[1] < low_limit):
return
if (self.localDevice.objectIdentifier[1] > high_limit):
return
# find the object
if apdu.object.objectIdentifier is not None:
obj = self.objectIdentifier.get(apdu.object.objectIdentifier, None)
elif apdu.object.objectName is not None:
obj = self.objectName.get(apdu.object.objectName, None)
else:
raise InconsistentParameters("object identifier or object name required")
# maybe we don't have it
if not obj:
return
# send out the response
self.i_have(obj, address=apdu.pduSource) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_IHaveRequest(self, apdu):
"""Respond to a I-Have request.""" |
if _debug: WhoHasIHaveServices._debug("do_IHaveRequest %r", apdu)
# check for required parameters
if apdu.deviceIdentifier is None:
raise MissingRequiredParameter("deviceIdentifier required")
if apdu.objectIdentifier is None:
raise MissingRequiredParameter("objectIdentifier required")
if apdu.objectName is None:
raise MissingRequiredParameter("objectName required") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_state(self, newState, timer=0):
"""This function is called when the derived class wants to change state.""" |
if _debug: SSM._debug("set_state %r (%s) timer=%r", newState, SSM.transactionLabels[newState], timer)
# make sure we have a correct transition
if (self.state == COMPLETED) or (self.state == ABORTED):
e = RuntimeError("invalid state transition from %s to %s" % (SSM.transactionLabels[self.state], SSM.transactionLabels[newState]))
SSM._exception(e)
raise e
# stop any current timer
self.stop_timer()
# make the change
self.state = newState
# if another timer should be started, start it
if timer:
self.start_timer(timer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_segmentation_context(self, apdu):
"""This function is called to set the segmentation context.""" |
if _debug: SSM._debug("set_segmentation_context %s", repr(apdu))
# set the context
self.segmentAPDU = apdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_segment(self, indx):
"""This function returns an APDU coorisponding to a particular segment of a confirmed request or complex ack. The segmentAPDU is the context.""" |
if _debug: SSM._debug("get_segment %r", indx)
# check for no context
if not self.segmentAPDU:
raise RuntimeError("no segmentation context established")
# check for invalid segment number
if indx >= self.segmentCount:
raise RuntimeError("invalid segment number %r, APDU has %r segments" % (indx, self.segmentCount))
if self.segmentAPDU.apduType == ConfirmedRequestPDU.pduType:
if _debug: SSM._debug(" - confirmed request context")
segAPDU = ConfirmedRequestPDU(self.segmentAPDU.apduService)
segAPDU.apduMaxSegs = encode_max_segments_accepted(self.maxSegmentsAccepted)
segAPDU.apduMaxResp = encode_max_apdu_length_accepted(self.maxApduLengthAccepted)
segAPDU.apduInvokeID = self.invokeID
# segmented response accepted?
segAPDU.apduSA = self.segmentationSupported in ('segmentedReceive', 'segmentedBoth')
if _debug: SSM._debug(" - segmented response accepted: %r", segAPDU.apduSA)
elif self.segmentAPDU.apduType == ComplexAckPDU.pduType:
if _debug: SSM._debug(" - complex ack context")
segAPDU = ComplexAckPDU(self.segmentAPDU.apduService, self.segmentAPDU.apduInvokeID)
else:
raise RuntimeError("invalid APDU type for segmentation context")
# maintain the the user data reference
segAPDU.pduUserData = self.segmentAPDU.pduUserData
# make sure the destination is set
segAPDU.pduDestination = self.pdu_address
# segmented message?
if (self.segmentCount != 1):
segAPDU.apduSeg = True
segAPDU.apduMor = (indx < (self.segmentCount - 1)) # more follows
segAPDU.apduSeq = indx % 256 # sequence number
# first segment sends proposed window size, rest get actual
if indx == 0:
if _debug: SSM._debug(" - proposedWindowSize: %r", self.ssmSAP.proposedWindowSize)
segAPDU.apduWin = self.ssmSAP.proposedWindowSize
else:
if _debug: SSM._debug(" - actualWindowSize: %r", self.actualWindowSize)
segAPDU.apduWin = self.actualWindowSize
else:
segAPDU.apduSeg = False
segAPDU.apduMor = False
# add the content
offset = indx * self.segmentSize
segAPDU.put_data( self.segmentAPDU.pduData[offset:offset+self.segmentSize] )
# success
return segAPDU |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append_segment(self, apdu):
"""This function appends the apdu content to the end of the current APDU being built. The segmentAPDU is the context.""" |
if _debug: SSM._debug("append_segment %r", apdu)
# check for no context
if not self.segmentAPDU:
raise RuntimeError("no segmentation context established")
# append the data
self.segmentAPDU.put_data(apdu.pduData) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill_window(self, seqNum):
"""This function sends all of the packets necessary to fill out the segmentation window.""" |
if _debug: SSM._debug("fill_window %r", seqNum)
if _debug: SSM._debug(" - actualWindowSize: %r", self.actualWindowSize)
for ix in range(self.actualWindowSize):
apdu = self.get_segment(seqNum + ix)
# send the message
self.ssmSAP.request(apdu)
# check for no more follows
if not apdu.apduMor:
self.sentAllSegments = True
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, apdu):
"""This function is called by client transaction functions when it wants to send a message to the device.""" |
if _debug: ClientSSM._debug("request %r", apdu)
# make sure it has a good source and destination
apdu.pduSource = None
apdu.pduDestination = self.pdu_address
# send it via the device
self.ssmSAP.request(apdu) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def indication(self, apdu):
"""This function is called after the device has bound a new transaction and wants to start the process rolling.""" |
if _debug: ClientSSM._debug("indication %r", apdu)
# make sure we're getting confirmed requests
if (apdu.apduType != ConfirmedRequestPDU.pduType):
raise RuntimeError("invalid APDU (1)")
# save the request and set the segmentation context
self.set_segmentation_context(apdu)
# if the max apdu length of the server isn't known, assume that it
# is the same size as our own and will be the segment size
if (not self.device_info) or (self.device_info.maxApduLengthAccepted is None):
self.segmentSize = self.maxApduLengthAccepted
# if the max npdu length of the server isn't known, assume that it
# is the same as the max apdu length accepted
elif self.device_info.maxNpduLength is None:
self.segmentSize = self.device_info.maxApduLengthAccepted
# the segment size is the minimum of the size of the largest packet
# that can be delivered to the server and the largest it can accept
else:
self.segmentSize = min(self.device_info.maxNpduLength, self.device_info.maxApduLengthAccepted)
if _debug: ClientSSM._debug(" - segment size: %r", self.segmentSize)
# save the invoke ID
self.invokeID = apdu.apduInvokeID
if _debug: ClientSSM._debug(" - invoke ID: %r", self.invokeID)
# compute the segment count
if not apdu.pduData:
# always at least one segment
self.segmentCount = 1
else:
# split into chunks, maybe need one more
self.segmentCount, more = divmod(len(apdu.pduData), self.segmentSize)
if more:
self.segmentCount += 1
if _debug: ClientSSM._debug(" - segment count: %r", self.segmentCount)
# make sure we support segmented transmit if we need to
if self.segmentCount > 1:
if self.segmentationSupported not in ('segmentedTransmit', 'segmentedBoth'):
if _debug: ClientSSM._debug(" - local device can't send segmented requests")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
if not self.device_info:
if _debug: ClientSSM._debug(" - no server info for segmentation support")
elif self.device_info.segmentationSupported not in ('segmentedReceive', 'segmentedBoth'):
if _debug: ClientSSM._debug(" - server can't receive segmented requests")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
# make sure we dont exceed the number of segments in our request
# that the server said it was willing to accept
if not self.device_info:
if _debug: ClientSSM._debug(" - no server info for maximum number of segments")
elif not self.device_info.maxSegmentsAccepted:
if _debug: ClientSSM._debug(" - server doesn't say maximum number of segments")
elif self.segmentCount > self.device_info.maxSegmentsAccepted:
if _debug: ClientSSM._debug(" - server can't receive enough segments")
abort = self.abort(AbortReason.apduTooLong)
self.response(abort)
return
# send out the first segment (or the whole thing)
if self.segmentCount == 1:
# unsegmented
self.sentAllSegments = True
self.retryCount = 0
self.set_state(AWAIT_CONFIRMATION, self.apduTimeout)
else:
# segmented
self.sentAllSegments = False
self.retryCount = 0
self.segmentRetryCount = 0
self.initialSequenceNumber = 0
self.actualWindowSize = None # segment ack will set value
self.set_state(SEGMENTED_REQUEST, self.segmentTimeout)
# deliver to the device
self.request(self.get_segment(0)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def response(self, apdu):
"""This function is called by client transaction functions when they want to send a message to the application.""" |
if _debug: ClientSSM._debug("response %r", apdu)
# make sure it has a good source and destination
apdu.pduSource = self.pdu_address
apdu.pduDestination = None
# send it to the application
self.ssmSAP.sap_response(apdu) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def confirmation(self, apdu):
"""This function is called by the device for all upstream messages related to the transaction.""" |
if _debug: ClientSSM._debug("confirmation %r", apdu)
if self.state == SEGMENTED_REQUEST:
self.segmented_request(apdu)
elif self.state == AWAIT_CONFIRMATION:
self.await_confirmation(apdu)
elif self.state == SEGMENTED_CONFIRMATION:
self.segmented_confirmation(apdu)
else:
raise RuntimeError("invalid state") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_task(self):
"""This function is called when something has taken too long.""" |
if _debug: ClientSSM._debug("process_task")
if self.state == SEGMENTED_REQUEST:
self.segmented_request_timeout()
elif self.state == AWAIT_CONFIRMATION:
self.await_confirmation_timeout()
elif self.state == SEGMENTED_CONFIRMATION:
self.segmented_confirmation_timeout()
elif self.state == COMPLETED:
pass
elif self.state == ABORTED:
pass
else:
e = RuntimeError("invalid state")
ClientSSM._exception("exception: %r", e)
raise e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def abort(self, reason):
"""This function is called when the transaction should be aborted.""" |
if _debug: ClientSSM._debug("abort %r", reason)
# change the state to aborted
self.set_state(ABORTED)
# build an abort PDU to return
abort_pdu = AbortPDU(False, self.invokeID, reason)
# return it
return abort_pdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def segmented_request(self, apdu):
"""This function is called when the client is sending a segmented request and receives an apdu.""" |
if _debug: ClientSSM._debug("segmented_request %r", apdu)
# server is ready for the next segment
if apdu.apduType == SegmentAckPDU.pduType:
if _debug: ClientSSM._debug(" - segment ack")
# actual window size is provided by server
self.actualWindowSize = apdu.apduWin
# duplicate ack received?
if not self.in_window(apdu.apduSeq, self.initialSequenceNumber):
if _debug: ClientSSM._debug(" - not in window")
self.restart_timer(self.segmentTimeout)
# final ack received?
elif self.sentAllSegments:
if _debug: ClientSSM._debug(" - all done sending request")
self.set_state(AWAIT_CONFIRMATION, self.apduTimeout)
# more segments to send
else:
if _debug: ClientSSM._debug(" - more segments to send")
self.initialSequenceNumber = (apdu.apduSeq + 1) % 256
self.segmentRetryCount = 0
self.fill_window(self.initialSequenceNumber)
self.restart_timer(self.segmentTimeout)
# simple ack
elif (apdu.apduType == SimpleAckPDU.pduType):
if _debug: ClientSSM._debug(" - simple ack")
if not self.sentAllSegments:
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
else:
self.set_state(COMPLETED)
self.response(apdu)
elif (apdu.apduType == ComplexAckPDU.pduType):
if _debug: ClientSSM._debug(" - complex ack")
if not self.sentAllSegments:
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
elif not apdu.apduSeg:
# ack is not segmented
self.set_state(COMPLETED)
self.response(apdu)
else:
# set the segmented response context
self.set_segmentation_context(apdu)
# minimum of what the server is proposing and this client proposes
self.actualWindowSize = min(apdu.apduWin, self.ssmSAP.proposedWindowSize)
self.lastSequenceNumber = 0
self.initialSequenceNumber = 0
self.set_state(SEGMENTED_CONFIRMATION, self.segmentTimeout)
# some kind of problem
elif (apdu.apduType == ErrorPDU.pduType) or (apdu.apduType == RejectPDU.pduType) or (apdu.apduType == AbortPDU.pduType):
if _debug: ClientSSM._debug(" - error/reject/abort")
self.set_state(COMPLETED)
self.response = apdu
self.response(apdu)
else:
raise RuntimeError("invalid APDU (2)") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_state(self, newState, timer=0):
"""This function is called when the client wants to change state.""" |
if _debug: ServerSSM._debug("set_state %r (%s) timer=%r", newState, SSM.transactionLabels[newState], timer)
# do the regular state change
SSM.set_state(self, newState, timer)
# when completed or aborted, remove tracking
if (newState == COMPLETED) or (newState == ABORTED):
if _debug: ServerSSM._debug(" - remove from active transactions")
self.ssmSAP.serverTransactions.remove(self)
# release the device info
if self.device_info:
if _debug: ClientSSM._debug(" - release device information")
self.ssmSAP.deviceInfoCache.release(self.device_info) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, apdu):
"""This function is called by transaction functions to send to the application.""" |
if _debug: ServerSSM._debug("request %r", apdu)
# make sure it has a good source and destination
apdu.pduSource = self.pdu_address
apdu.pduDestination = None
# send it via the device
self.ssmSAP.sap_request(apdu) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def indication(self, apdu):
"""This function is called for each downstream packet related to the transaction.""" |
if _debug: ServerSSM._debug("indication %r", apdu)
if self.state == IDLE:
self.idle(apdu)
elif self.state == SEGMENTED_REQUEST:
self.segmented_request(apdu)
elif self.state == AWAIT_RESPONSE:
self.await_response(apdu)
elif self.state == SEGMENTED_RESPONSE:
self.segmented_response(apdu)
else:
if _debug: ServerSSM._debug(" - invalid state") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def confirmation(self, apdu):
"""This function is called when the application has provided a response and needs it to be sent to the client.""" |
if _debug: ServerSSM._debug("confirmation %r", apdu)
# check to see we are in the correct state
if self.state != AWAIT_RESPONSE:
if _debug: ServerSSM._debug(" - warning: not expecting a response")
# abort response
if (apdu.apduType == AbortPDU.pduType):
if _debug: ServerSSM._debug(" - abort")
self.set_state(ABORTED)
# send the response to the device
self.response(apdu)
return
# simple response
if (apdu.apduType == SimpleAckPDU.pduType) or (apdu.apduType == ErrorPDU.pduType) or (apdu.apduType == RejectPDU.pduType):
if _debug: ServerSSM._debug(" - simple ack, error, or reject")
# transaction completed
self.set_state(COMPLETED)
# send the response to the device
self.response(apdu)
return
# complex ack
if (apdu.apduType == ComplexAckPDU.pduType):
if _debug: ServerSSM._debug(" - complex ack")
# save the response and set the segmentation context
self.set_segmentation_context(apdu)
# the segment size is the minimum of the size of the largest packet
# that can be delivered to the client and the largest it can accept
if (not self.device_info) or (self.device_info.maxNpduLength is None):
self.segmentSize = self.maxApduLengthAccepted
else:
self.segmentSize = min(self.device_info.maxNpduLength, self.maxApduLengthAccepted)
if _debug: ServerSSM._debug(" - segment size: %r", self.segmentSize)
# compute the segment count
if not apdu.pduData:
# always at least one segment
self.segmentCount = 1
else:
# split into chunks, maybe need one more
self.segmentCount, more = divmod(len(apdu.pduData), self.segmentSize)
if more:
self.segmentCount += 1
if _debug: ServerSSM._debug(" - segment count: %r", self.segmentCount)
# make sure we support segmented transmit if we need to
if self.segmentCount > 1:
if _debug: ServerSSM._debug(" - segmentation required, %d segments", self.segmentCount)
# make sure we support segmented transmit
if self.segmentationSupported not in ('segmentedTransmit', 'segmentedBoth'):
if _debug: ServerSSM._debug(" - server can't send segmented responses")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
# make sure client supports segmented receive
if not self.segmented_response_accepted:
if _debug: ServerSSM._debug(" - client can't receive segmented responses")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
# make sure we dont exceed the number of segments in our response
# that the client said it was willing to accept in the request
if (self.maxSegmentsAccepted is not None) and (self.segmentCount > self.maxSegmentsAccepted):
if _debug: ServerSSM._debug(" - client can't receive enough segments")
abort = self.abort(AbortReason.apduTooLong)
self.response(abort)
return
# initialize the state
self.segmentRetryCount = 0
self.initialSequenceNumber = 0
self.actualWindowSize = None
# send out the first segment (or the whole thing)
if self.segmentCount == 1:
self.response(apdu)
self.set_state(COMPLETED)
else:
self.response(self.get_segment(0))
self.set_state(SEGMENTED_RESPONSE, self.segmentTimeout)
else:
raise RuntimeError("invalid APDU (4)") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_task(self):
"""This function is called when the client has failed to send all of the segments of a segmented request, the application has taken too long to complete the request, or the client failed to ack the segments of a segmented response.""" |
if _debug: ServerSSM._debug("process_task")
if self.state == SEGMENTED_REQUEST:
self.segmented_request_timeout()
elif self.state == AWAIT_RESPONSE:
self.await_response_timeout()
elif self.state == SEGMENTED_RESPONSE:
self.segmented_response_timeout()
elif self.state == COMPLETED:
pass
elif self.state == ABORTED:
pass
else:
if _debug: ServerSSM._debug("invalid state")
raise RuntimeError("invalid state") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def abort(self, reason):
"""This function is called when the application would like to abort the transaction. There is no notification back to the application.""" |
if _debug: ServerSSM._debug("abort %r", reason)
# change the state to aborted
self.set_state(ABORTED)
# return an abort APDU
return AbortPDU(True, self.invokeID, reason) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def await_response_timeout(self):
"""This function is called when the application has taken too long to respond to a clients request. The client has probably long since given up.""" |
if _debug: ServerSSM._debug("await_response_timeout")
abort = self.abort(AbortReason.serverTimeout)
self.request(abort) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_next_invoke_id(self, addr):
"""Called by clients to get an unused invoke ID.""" |
if _debug: StateMachineAccessPoint._debug("get_next_invoke_id")
initialID = self.nextInvokeID
while 1:
invokeID = self.nextInvokeID
self.nextInvokeID = (self.nextInvokeID + 1) % 256
# see if we've checked for them all
if initialID == self.nextInvokeID:
raise RuntimeError("no available invoke ID")
for tr in self.clientTransactions:
if (invokeID == tr.invokeID) and (addr == tr.pdu_address):
break
else:
break
return invokeID |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sap_indication(self, apdu):
"""This function is called when the application is requesting a new transaction as a client.""" |
if _debug: StateMachineAccessPoint._debug("sap_indication %r", apdu)
# check device communication control
if self.dccEnableDisable == 'enable':
if _debug: StateMachineAccessPoint._debug(" - communications enabled")
elif self.dccEnableDisable == 'disable':
if _debug: StateMachineAccessPoint._debug(" - communications disabled")
return
elif self.dccEnableDisable == 'disableInitiation':
if _debug: StateMachineAccessPoint._debug(" - initiation disabled")
if (apdu.apduType == 1) and (apdu.apduService == 0):
if _debug: StateMachineAccessPoint._debug(" - continue with I-Am")
else:
if _debug: StateMachineAccessPoint._debug(" - not an I-Am")
return
if isinstance(apdu, UnconfirmedRequestPDU):
# deliver to the device
self.request(apdu)
elif isinstance(apdu, ConfirmedRequestPDU):
# make sure it has an invoke ID
if apdu.apduInvokeID is None:
apdu.apduInvokeID = self.get_next_invoke_id(apdu.pduDestination)
else:
# verify the invoke ID isn't already being used
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduDestination == tr.pdu_address):
raise RuntimeError("invoke ID in use")
# warning for bogus requests
if (apdu.pduDestination.addrType != Address.localStationAddr) and (apdu.pduDestination.addrType != Address.remoteStationAddr):
StateMachineAccessPoint._warning("%s is not a local or remote station", apdu.pduDestination)
# create a client transaction state machine
tr = ClientSSM(self, apdu.pduDestination)
if _debug: StateMachineAccessPoint._debug(" - client segmentation state machine: %r", tr)
# add it to our transactions to track it
self.clientTransactions.append(tr)
# let it run
tr.indication(apdu)
else:
raise RuntimeError("invalid APDU (9)") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.