code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def parse_args(arguments=None, root=None, apply_config=False):
"""Parse the arguments from the CLI.
If apply_config then we first look up and apply configs using
apply_config_defaults.
"""
if arguments is None:
arguments = []
parser = create_parser()
args = parser.parse_args(arguments)
if apply_config:
parser = apply_config_defaults(parser, args, root=root)
args = parser.parse_args(arguments)
# sanity check args (from autopep8)
if args.max_line_length <= 0: # pragma: no cover
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = _split_comma_separated(args.select)
if args.ignore:
args.ignore = _split_comma_separated(args.ignore)
elif not args.select and args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W']
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude)
else:
args.exclude = []
return args | def function[parse_args, parameter[arguments, root, apply_config]]:
constant[Parse the arguments from the CLI.
If apply_config then we first look up and apply configs using
apply_config_defaults.
]
if compare[name[arguments] is constant[None]] begin[:]
variable[arguments] assign[=] list[[]]
variable[parser] assign[=] call[name[create_parser], parameter[]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[arguments]]]
if name[apply_config] begin[:]
variable[parser] assign[=] call[name[apply_config_defaults], parameter[name[parser], name[args]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[arguments]]]
if compare[name[args].max_line_length less_or_equal[<=] constant[0]] begin[:]
call[name[parser].error, parameter[constant[--max-line-length must be greater than 0]]]
if name[args].select begin[:]
name[args].select assign[=] call[name[_split_comma_separated], parameter[name[args].select]]
if name[args].ignore begin[:]
name[args].ignore assign[=] call[name[_split_comma_separated], parameter[name[args].ignore]]
if name[args].exclude begin[:]
name[args].exclude assign[=] call[name[_split_comma_separated], parameter[name[args].exclude]]
return[name[args]] | keyword[def] identifier[parse_args] ( identifier[arguments] = keyword[None] , identifier[root] = keyword[None] , identifier[apply_config] = keyword[False] ):
literal[string]
keyword[if] identifier[arguments] keyword[is] keyword[None] :
identifier[arguments] =[]
identifier[parser] = identifier[create_parser] ()
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[arguments] )
keyword[if] identifier[apply_config] :
identifier[parser] = identifier[apply_config_defaults] ( identifier[parser] , identifier[args] , identifier[root] = identifier[root] )
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[arguments] )
keyword[if] identifier[args] . identifier[max_line_length] <= literal[int] :
identifier[parser] . identifier[error] ( literal[string] )
keyword[if] identifier[args] . identifier[select] :
identifier[args] . identifier[select] = identifier[_split_comma_separated] ( identifier[args] . identifier[select] )
keyword[if] identifier[args] . identifier[ignore] :
identifier[args] . identifier[ignore] = identifier[_split_comma_separated] ( identifier[args] . identifier[ignore] )
keyword[elif] keyword[not] identifier[args] . identifier[select] keyword[and] identifier[args] . identifier[aggressive] :
identifier[args] . identifier[select] =[ literal[string] , literal[string] ]
keyword[else] :
identifier[args] . identifier[ignore] = identifier[_split_comma_separated] ( identifier[DEFAULT_IGNORE] )
keyword[if] identifier[args] . identifier[exclude] :
identifier[args] . identifier[exclude] = identifier[_split_comma_separated] ( identifier[args] . identifier[exclude] )
keyword[else] :
identifier[args] . identifier[exclude] =[]
keyword[return] identifier[args] | def parse_args(arguments=None, root=None, apply_config=False):
"""Parse the arguments from the CLI.
If apply_config then we first look up and apply configs using
apply_config_defaults.
"""
if arguments is None:
arguments = [] # depends on [control=['if'], data=['arguments']]
parser = create_parser()
args = parser.parse_args(arguments)
if apply_config:
parser = apply_config_defaults(parser, args, root=root)
args = parser.parse_args(arguments) # depends on [control=['if'], data=[]]
# sanity check args (from autopep8)
if args.max_line_length <= 0: # pragma: no cover
parser.error('--max-line-length must be greater than 0') # depends on [control=['if'], data=[]]
if args.select:
args.select = _split_comma_separated(args.select) # depends on [control=['if'], data=[]]
if args.ignore:
args.ignore = _split_comma_separated(args.ignore) # depends on [control=['if'], data=[]]
elif not args.select and args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W'] # depends on [control=['if'], data=[]]
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude) # depends on [control=['if'], data=[]]
else:
args.exclude = []
return args |
def _get_number_productions(sentence: str) -> List[str]:
"""
Gathers all the numbers in the sentence, and returns productions that lead to them.
"""
# The mapping here is very simple and limited, which also shouldn't be a problem
# because numbers seem to be represented fairly regularly.
number_strings = {"one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six":
"6", "seven": "7", "eight": "8", "nine": "9", "ten": "10"}
number_productions = []
tokens = sentence.split()
numbers = number_strings.values()
for token in tokens:
if token in numbers:
number_productions.append(f"int -> {token}")
elif token in number_strings:
number_productions.append(f"int -> {number_strings[token]}")
return number_productions | def function[_get_number_productions, parameter[sentence]]:
constant[
Gathers all the numbers in the sentence, and returns productions that lead to them.
]
variable[number_strings] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93bb0>, <ast.Constant object at 0x7da18fe91c00>, <ast.Constant object at 0x7da18fe930d0>, <ast.Constant object at 0x7da18fe90fa0>, <ast.Constant object at 0x7da18fe939d0>, <ast.Constant object at 0x7da18fe936a0>, <ast.Constant object at 0x7da18fe92350>, <ast.Constant object at 0x7da18fe92d70>, <ast.Constant object at 0x7da18fe90e20>, <ast.Constant object at 0x7da18fe901f0>], [<ast.Constant object at 0x7da18fe90910>, <ast.Constant object at 0x7da18fe91510>, <ast.Constant object at 0x7da18fe91810>, <ast.Constant object at 0x7da18fe92920>, <ast.Constant object at 0x7da18fe93df0>, <ast.Constant object at 0x7da18fe90f70>, <ast.Constant object at 0x7da18fe91c90>, <ast.Constant object at 0x7da18fe91690>, <ast.Constant object at 0x7da18fe91e10>, <ast.Constant object at 0x7da18fe93790>]]
variable[number_productions] assign[=] list[[]]
variable[tokens] assign[=] call[name[sentence].split, parameter[]]
variable[numbers] assign[=] call[name[number_strings].values, parameter[]]
for taget[name[token]] in starred[name[tokens]] begin[:]
if compare[name[token] in name[numbers]] begin[:]
call[name[number_productions].append, parameter[<ast.JoinedStr object at 0x7da18fe91000>]]
return[name[number_productions]] | keyword[def] identifier[_get_number_productions] ( identifier[sentence] : identifier[str] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[number_strings] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :
literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
identifier[number_productions] =[]
identifier[tokens] = identifier[sentence] . identifier[split] ()
identifier[numbers] = identifier[number_strings] . identifier[values] ()
keyword[for] identifier[token] keyword[in] identifier[tokens] :
keyword[if] identifier[token] keyword[in] identifier[numbers] :
identifier[number_productions] . identifier[append] ( literal[string] )
keyword[elif] identifier[token] keyword[in] identifier[number_strings] :
identifier[number_productions] . identifier[append] ( literal[string] )
keyword[return] identifier[number_productions] | def _get_number_productions(sentence: str) -> List[str]:
"""
Gathers all the numbers in the sentence, and returns productions that lead to them.
"""
# The mapping here is very simple and limited, which also shouldn't be a problem
# because numbers seem to be represented fairly regularly.
number_strings = {'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10'}
number_productions = []
tokens = sentence.split()
numbers = number_strings.values()
for token in tokens:
if token in numbers:
number_productions.append(f'int -> {token}') # depends on [control=['if'], data=['token']]
elif token in number_strings:
number_productions.append(f'int -> {number_strings[token]}') # depends on [control=['if'], data=['token', 'number_strings']] # depends on [control=['for'], data=['token']]
return number_productions |
def _getLocation(self, coordinate, reference_id, strand, position_types):
"""
Make an object for the location, which has:
{coordinate : integer, reference : reference_id, types = []}
where the strand is indicated in the type array
:param coordinate:
:param reference_id:
:param strand:
:param position_types:
:return:
"""
loc = {}
loc['coordinate'] = coordinate
loc['reference'] = reference_id
loc['type'] = []
strand_id = self._getStrandType(strand)
if strand_id is not None:
loc['type'].append(strand_id)
if position_types is not None:
loc['type'] += position_types
if position_types == []:
loc['type'].append(self.globaltt['Position'])
return loc | def function[_getLocation, parameter[self, coordinate, reference_id, strand, position_types]]:
constant[
Make an object for the location, which has:
{coordinate : integer, reference : reference_id, types = []}
where the strand is indicated in the type array
:param coordinate:
:param reference_id:
:param strand:
:param position_types:
:return:
]
variable[loc] assign[=] dictionary[[], []]
call[name[loc]][constant[coordinate]] assign[=] name[coordinate]
call[name[loc]][constant[reference]] assign[=] name[reference_id]
call[name[loc]][constant[type]] assign[=] list[[]]
variable[strand_id] assign[=] call[name[self]._getStrandType, parameter[name[strand]]]
if compare[name[strand_id] is_not constant[None]] begin[:]
call[call[name[loc]][constant[type]].append, parameter[name[strand_id]]]
if compare[name[position_types] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18f58e6e0>
if compare[name[position_types] equal[==] list[[]]] begin[:]
call[call[name[loc]][constant[type]].append, parameter[call[name[self].globaltt][constant[Position]]]]
return[name[loc]] | keyword[def] identifier[_getLocation] ( identifier[self] , identifier[coordinate] , identifier[reference_id] , identifier[strand] , identifier[position_types] ):
literal[string]
identifier[loc] ={}
identifier[loc] [ literal[string] ]= identifier[coordinate]
identifier[loc] [ literal[string] ]= identifier[reference_id]
identifier[loc] [ literal[string] ]=[]
identifier[strand_id] = identifier[self] . identifier[_getStrandType] ( identifier[strand] )
keyword[if] identifier[strand_id] keyword[is] keyword[not] keyword[None] :
identifier[loc] [ literal[string] ]. identifier[append] ( identifier[strand_id] )
keyword[if] identifier[position_types] keyword[is] keyword[not] keyword[None] :
identifier[loc] [ literal[string] ]+= identifier[position_types]
keyword[if] identifier[position_types] ==[]:
identifier[loc] [ literal[string] ]. identifier[append] ( identifier[self] . identifier[globaltt] [ literal[string] ])
keyword[return] identifier[loc] | def _getLocation(self, coordinate, reference_id, strand, position_types):
"""
Make an object for the location, which has:
{coordinate : integer, reference : reference_id, types = []}
where the strand is indicated in the type array
:param coordinate:
:param reference_id:
:param strand:
:param position_types:
:return:
"""
loc = {}
loc['coordinate'] = coordinate
loc['reference'] = reference_id
loc['type'] = []
strand_id = self._getStrandType(strand)
if strand_id is not None:
loc['type'].append(strand_id) # depends on [control=['if'], data=['strand_id']]
if position_types is not None:
loc['type'] += position_types # depends on [control=['if'], data=['position_types']]
if position_types == []:
loc['type'].append(self.globaltt['Position']) # depends on [control=['if'], data=[]]
return loc |
def cmd(send, msg, args):
"""Translate something.
Syntax: {command} [--from <language code>] [--to <language code>] <text>
See https://cloud.google.com/translate/v2/translate-reference#supported_languages for a list of valid language codes
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('--lang', '--from', default=None)
parser.add_argument('--to', default='en')
parser.add_argument('msg', nargs='+')
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
send(gen_translate(' '.join(cmdargs.msg), cmdargs.lang, cmdargs.to)) | def function[cmd, parameter[send, msg, args]]:
constant[Translate something.
Syntax: {command} [--from <language code>] [--to <language code>] <text>
See https://cloud.google.com/translate/v2/translate-reference#supported_languages for a list of valid language codes
]
variable[parser] assign[=] call[name[arguments].ArgParser, parameter[call[name[args]][constant[config]]]]
call[name[parser].add_argument, parameter[constant[--lang], constant[--from]]]
call[name[parser].add_argument, parameter[constant[--to]]]
call[name[parser].add_argument, parameter[constant[msg]]]
<ast.Try object at 0x7da18f09ed10>
call[name[send], parameter[call[name[gen_translate], parameter[call[constant[ ].join, parameter[name[cmdargs].msg]], name[cmdargs].lang, name[cmdargs].to]]]] | keyword[def] identifier[cmd] ( identifier[send] , identifier[msg] , identifier[args] ):
literal[string]
identifier[parser] = identifier[arguments] . identifier[ArgParser] ( identifier[args] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = keyword[None] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] )
keyword[try] :
identifier[cmdargs] = identifier[parser] . identifier[parse_args] ( identifier[msg] )
keyword[except] identifier[arguments] . identifier[ArgumentException] keyword[as] identifier[e] :
identifier[send] ( identifier[str] ( identifier[e] ))
keyword[return]
identifier[send] ( identifier[gen_translate] ( literal[string] . identifier[join] ( identifier[cmdargs] . identifier[msg] ), identifier[cmdargs] . identifier[lang] , identifier[cmdargs] . identifier[to] )) | def cmd(send, msg, args):
"""Translate something.
Syntax: {command} [--from <language code>] [--to <language code>] <text>
See https://cloud.google.com/translate/v2/translate-reference#supported_languages for a list of valid language codes
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('--lang', '--from', default=None)
parser.add_argument('--to', default='en')
parser.add_argument('msg', nargs='+')
try:
cmdargs = parser.parse_args(msg) # depends on [control=['try'], data=[]]
except arguments.ArgumentException as e:
send(str(e))
return # depends on [control=['except'], data=['e']]
send(gen_translate(' '.join(cmdargs.msg), cmdargs.lang, cmdargs.to)) |
def PartialDynamicSystem(self, ieq, variable):
"""
returns dynamical system blocks associated to output variable
"""
if ieq == 0:
if variable == self.physical_nodes[0].variable:
print('1')
# U1 is output
# U1=i1/pC+U2
Uc = Variable(hidden=True)
block1 = ODE(self.variables[0], Uc, [1], [0, self.C])
sub1 = Sum([self.physical_nodes[1].variable, Uc], variable)
return [block1, sub1]
elif variable == self.physical_nodes[1].variable:
print('2')
# U2 is output
# U2=U1-i1/pC
Uc = Variable(hidden=True)
block1 = ODE(self.variables[0], Uc, [-1], [0, self.C])
sum1 = Sum([self.physical_nodes[0].variable, Uc], variable)
return [block1, sum1]
# elif variable==self.variables[0]:
# print('3')
# # i1 is output
# # i1=pC(U1-U2)
# ic=Variable(hidden=True)
# subs1=Subtraction(self.physical_nodes[0].variable,self.physical_nodes[1].variable,ic)
# block1=ODE(ic,variable,[0,self.C],[1])
# return [block1,subs1]
elif ieq == 1:
# i1=-i2
if variable == self.variables[0]:
# i1 as output
# print('Bat1#0')
return [Gain(self.variables[1], self.variables[0], -1)]
elif variable == self.variables[1]:
# i2 as output
# print('Bat1#1')
return [Gain(self.variables[0], self.variables[1], -1)] | def function[PartialDynamicSystem, parameter[self, ieq, variable]]:
constant[
returns dynamical system blocks associated to output variable
]
if compare[name[ieq] equal[==] constant[0]] begin[:]
if compare[name[variable] equal[==] call[name[self].physical_nodes][constant[0]].variable] begin[:]
call[name[print], parameter[constant[1]]]
variable[Uc] assign[=] call[name[Variable], parameter[]]
variable[block1] assign[=] call[name[ODE], parameter[call[name[self].variables][constant[0]], name[Uc], list[[<ast.Constant object at 0x7da1b0b9cbb0>]], list[[<ast.Constant object at 0x7da1b0b9d330>, <ast.Attribute object at 0x7da1b0b9ead0>]]]]
variable[sub1] assign[=] call[name[Sum], parameter[list[[<ast.Attribute object at 0x7da1b0b9e4a0>, <ast.Name object at 0x7da1b0b9ece0>]], name[variable]]]
return[list[[<ast.Name object at 0x7da1b0b9fa30>, <ast.Name object at 0x7da1b0b9f820>]]] | keyword[def] identifier[PartialDynamicSystem] ( identifier[self] , identifier[ieq] , identifier[variable] ):
literal[string]
keyword[if] identifier[ieq] == literal[int] :
keyword[if] identifier[variable] == identifier[self] . identifier[physical_nodes] [ literal[int] ]. identifier[variable] :
identifier[print] ( literal[string] )
identifier[Uc] = identifier[Variable] ( identifier[hidden] = keyword[True] )
identifier[block1] = identifier[ODE] ( identifier[self] . identifier[variables] [ literal[int] ], identifier[Uc] ,[ literal[int] ],[ literal[int] , identifier[self] . identifier[C] ])
identifier[sub1] = identifier[Sum] ([ identifier[self] . identifier[physical_nodes] [ literal[int] ]. identifier[variable] , identifier[Uc] ], identifier[variable] )
keyword[return] [ identifier[block1] , identifier[sub1] ]
keyword[elif] identifier[variable] == identifier[self] . identifier[physical_nodes] [ literal[int] ]. identifier[variable] :
identifier[print] ( literal[string] )
identifier[Uc] = identifier[Variable] ( identifier[hidden] = keyword[True] )
identifier[block1] = identifier[ODE] ( identifier[self] . identifier[variables] [ literal[int] ], identifier[Uc] ,[- literal[int] ],[ literal[int] , identifier[self] . identifier[C] ])
identifier[sum1] = identifier[Sum] ([ identifier[self] . identifier[physical_nodes] [ literal[int] ]. identifier[variable] , identifier[Uc] ], identifier[variable] )
keyword[return] [ identifier[block1] , identifier[sum1] ]
keyword[elif] identifier[ieq] == literal[int] :
keyword[if] identifier[variable] == identifier[self] . identifier[variables] [ literal[int] ]:
keyword[return] [ identifier[Gain] ( identifier[self] . identifier[variables] [ literal[int] ], identifier[self] . identifier[variables] [ literal[int] ],- literal[int] )]
keyword[elif] identifier[variable] == identifier[self] . identifier[variables] [ literal[int] ]:
keyword[return] [ identifier[Gain] ( identifier[self] . identifier[variables] [ literal[int] ], identifier[self] . identifier[variables] [ literal[int] ],- literal[int] )] | def PartialDynamicSystem(self, ieq, variable):
"""
returns dynamical system blocks associated to output variable
"""
if ieq == 0:
if variable == self.physical_nodes[0].variable:
print('1')
# U1 is output
# U1=i1/pC+U2
Uc = Variable(hidden=True)
block1 = ODE(self.variables[0], Uc, [1], [0, self.C])
sub1 = Sum([self.physical_nodes[1].variable, Uc], variable)
return [block1, sub1] # depends on [control=['if'], data=['variable']]
elif variable == self.physical_nodes[1].variable:
print('2')
# U2 is output
# U2=U1-i1/pC
Uc = Variable(hidden=True)
block1 = ODE(self.variables[0], Uc, [-1], [0, self.C])
sum1 = Sum([self.physical_nodes[0].variable, Uc], variable)
return [block1, sum1] # depends on [control=['if'], data=['variable']] # depends on [control=['if'], data=[]]
# elif variable==self.variables[0]:
# print('3')
# # i1 is output
# # i1=pC(U1-U2)
# ic=Variable(hidden=True)
# subs1=Subtraction(self.physical_nodes[0].variable,self.physical_nodes[1].variable,ic)
# block1=ODE(ic,variable,[0,self.C],[1])
# return [block1,subs1]
elif ieq == 1:
# i1=-i2
if variable == self.variables[0]:
# i1 as output
# print('Bat1#0')
return [Gain(self.variables[1], self.variables[0], -1)] # depends on [control=['if'], data=[]]
elif variable == self.variables[1]:
# i2 as output
# print('Bat1#1')
return [Gain(self.variables[0], self.variables[1], -1)] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def get_interfaces_ip(self):
"""
Get interface IP details. Returns a dictionary of dictionaries.
Sample output:
{
"Ethernet2/3": {
"ipv4": {
"4.4.4.4": {
"prefix_length": 16
}
},
"ipv6": {
"2001:db8::1": {
"prefix_length": 10
},
"fe80::2ec2:60ff:fe4f:feb2": {
"prefix_length": "128"
}
}
},
"Ethernet2/2": {
"ipv4": {
"2.2.2.2": {
"prefix_length": 27
}
}
}
}
"""
interfaces_ip = {}
ipv4_command = "show ip interface vrf all"
ipv6_command = "show ipv6 interface vrf all"
output_v4 = self._send_command(ipv4_command)
output_v6 = self._send_command(ipv6_command)
v4_interfaces = {}
for line in output_v4.splitlines():
# Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38,
# IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0
# IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0
if "Interface status" in line:
interface = line.split(",")[0]
continue
if "IP address" in line:
ip_address = line.split(",")[0].split()[2]
try:
prefix_len = int(line.split()[5].split("/")[1])
except ValueError:
prefix_len = "N/A"
val = {"prefix_length": prefix_len}
v4_interfaces.setdefault(interface, {})[ip_address] = val
v6_interfaces = {}
for line in output_v6.splitlines():
# Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40
# IPv6 address:
# 2001:11:2233::a1/24 [VALID]
# 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID]
# IPv6 subnet: 2001::/24
# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]
# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]
if "Interface status" in line:
interface = line.split(",")[0]
continue
if "VALID" in line:
line = line.strip()
if "link-local address" in line:
# match the following format:
# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]
ip_address = line.split()[3]
prefix_len = "64"
elif "IPv6 address" in line:
# match the following format:
# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]
ip_address = line.split()[2]
prefix_len = "64"
else:
ip_address, prefix_len = line.split()[0].split("/")
prefix_len = int(prefix_len)
val = {"prefix_length": prefix_len}
v6_interfaces.setdefault(interface, {})[ip_address] = val
# Join data from intermediate dictionaries.
for interface, data in v4_interfaces.items():
interfaces_ip.setdefault(interface, {"ipv4": {}})["ipv4"] = data
for interface, data in v6_interfaces.items():
interfaces_ip.setdefault(interface, {"ipv6": {}})["ipv6"] = data
return interfaces_ip | def function[get_interfaces_ip, parameter[self]]:
constant[
Get interface IP details. Returns a dictionary of dictionaries.
Sample output:
{
"Ethernet2/3": {
"ipv4": {
"4.4.4.4": {
"prefix_length": 16
}
},
"ipv6": {
"2001:db8::1": {
"prefix_length": 10
},
"fe80::2ec2:60ff:fe4f:feb2": {
"prefix_length": "128"
}
}
},
"Ethernet2/2": {
"ipv4": {
"2.2.2.2": {
"prefix_length": 27
}
}
}
}
]
variable[interfaces_ip] assign[=] dictionary[[], []]
variable[ipv4_command] assign[=] constant[show ip interface vrf all]
variable[ipv6_command] assign[=] constant[show ipv6 interface vrf all]
variable[output_v4] assign[=] call[name[self]._send_command, parameter[name[ipv4_command]]]
variable[output_v6] assign[=] call[name[self]._send_command, parameter[name[ipv6_command]]]
variable[v4_interfaces] assign[=] dictionary[[], []]
for taget[name[line]] in starred[call[name[output_v4].splitlines, parameter[]]] begin[:]
if compare[constant[Interface status] in name[line]] begin[:]
variable[interface] assign[=] call[call[name[line].split, parameter[constant[,]]]][constant[0]]
continue
if compare[constant[IP address] in name[line]] begin[:]
variable[ip_address] assign[=] call[call[call[call[name[line].split, parameter[constant[,]]]][constant[0]].split, parameter[]]][constant[2]]
<ast.Try object at 0x7da1b1c27370>
variable[val] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c26f20>], [<ast.Name object at 0x7da1b1c26ef0>]]
call[call[name[v4_interfaces].setdefault, parameter[name[interface], dictionary[[], []]]]][name[ip_address]] assign[=] name[val]
variable[v6_interfaces] assign[=] dictionary[[], []]
for taget[name[line]] in starred[call[name[output_v6].splitlines, parameter[]]] begin[:]
if compare[constant[Interface status] in name[line]] begin[:]
variable[interface] assign[=] call[call[name[line].split, parameter[constant[,]]]][constant[0]]
continue
if compare[constant[VALID] in name[line]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if compare[constant[link-local address] in name[line]] begin[:]
variable[ip_address] assign[=] call[call[name[line].split, parameter[]]][constant[3]]
variable[prefix_len] assign[=] constant[64]
variable[prefix_len] assign[=] call[name[int], parameter[name[prefix_len]]]
variable[val] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c25e10>], [<ast.Name object at 0x7da1b1c25de0>]]
call[call[name[v6_interfaces].setdefault, parameter[name[interface], dictionary[[], []]]]][name[ip_address]] assign[=] name[val]
for taget[tuple[[<ast.Name object at 0x7da1b1c25ba0>, <ast.Name object at 0x7da1b1c25b70>]]] in starred[call[name[v4_interfaces].items, parameter[]]] begin[:]
call[call[name[interfaces_ip].setdefault, parameter[name[interface], dictionary[[<ast.Constant object at 0x7da1b1c25960>], [<ast.Dict object at 0x7da1b1c25930>]]]]][constant[ipv4]] assign[=] name[data]
for taget[tuple[[<ast.Name object at 0x7da1b1c62770>, <ast.Name object at 0x7da1b1c633d0>]]] in starred[call[name[v6_interfaces].items, parameter[]]] begin[:]
call[call[name[interfaces_ip].setdefault, parameter[name[interface], dictionary[[<ast.Constant object at 0x7da1b1c63790>], [<ast.Dict object at 0x7da1b1c63640>]]]]][constant[ipv6]] assign[=] name[data]
return[name[interfaces_ip]] | keyword[def] identifier[get_interfaces_ip] ( identifier[self] ):
literal[string]
identifier[interfaces_ip] ={}
identifier[ipv4_command] = literal[string]
identifier[ipv6_command] = literal[string]
identifier[output_v4] = identifier[self] . identifier[_send_command] ( identifier[ipv4_command] )
identifier[output_v6] = identifier[self] . identifier[_send_command] ( identifier[ipv6_command] )
identifier[v4_interfaces] ={}
keyword[for] identifier[line] keyword[in] identifier[output_v4] . identifier[splitlines] ():
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[interface] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[ip_address] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ()[ literal[int] ]
keyword[try] :
identifier[prefix_len] = identifier[int] ( identifier[line] . identifier[split] ()[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ])
keyword[except] identifier[ValueError] :
identifier[prefix_len] = literal[string]
identifier[val] ={ literal[string] : identifier[prefix_len] }
identifier[v4_interfaces] . identifier[setdefault] ( identifier[interface] ,{})[ identifier[ip_address] ]= identifier[val]
identifier[v6_interfaces] ={}
keyword[for] identifier[line] keyword[in] identifier[output_v6] . identifier[splitlines] ():
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[interface] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[ip_address] = identifier[line] . identifier[split] ()[ literal[int] ]
identifier[prefix_len] = literal[string]
keyword[elif] literal[string] keyword[in] identifier[line] :
identifier[ip_address] = identifier[line] . identifier[split] ()[ literal[int] ]
identifier[prefix_len] = literal[string]
keyword[else] :
identifier[ip_address] , identifier[prefix_len] = identifier[line] . identifier[split] ()[ literal[int] ]. identifier[split] ( literal[string] )
identifier[prefix_len] = identifier[int] ( identifier[prefix_len] )
identifier[val] ={ literal[string] : identifier[prefix_len] }
identifier[v6_interfaces] . identifier[setdefault] ( identifier[interface] ,{})[ identifier[ip_address] ]= identifier[val]
keyword[for] identifier[interface] , identifier[data] keyword[in] identifier[v4_interfaces] . identifier[items] ():
identifier[interfaces_ip] . identifier[setdefault] ( identifier[interface] ,{ literal[string] :{}})[ literal[string] ]= identifier[data]
keyword[for] identifier[interface] , identifier[data] keyword[in] identifier[v6_interfaces] . identifier[items] ():
identifier[interfaces_ip] . identifier[setdefault] ( identifier[interface] ,{ literal[string] :{}})[ literal[string] ]= identifier[data]
keyword[return] identifier[interfaces_ip] | def get_interfaces_ip(self):
"""
Get interface IP details. Returns a dictionary of dictionaries.
Sample output:
{
"Ethernet2/3": {
"ipv4": {
"4.4.4.4": {
"prefix_length": 16
}
},
"ipv6": {
"2001:db8::1": {
"prefix_length": 10
},
"fe80::2ec2:60ff:fe4f:feb2": {
"prefix_length": "128"
}
}
},
"Ethernet2/2": {
"ipv4": {
"2.2.2.2": {
"prefix_length": 27
}
}
}
}
"""
interfaces_ip = {}
ipv4_command = 'show ip interface vrf all'
ipv6_command = 'show ipv6 interface vrf all'
output_v4 = self._send_command(ipv4_command)
output_v6 = self._send_command(ipv6_command)
v4_interfaces = {}
for line in output_v4.splitlines():
# Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38,
# IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0
# IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0
if 'Interface status' in line:
interface = line.split(',')[0]
continue # depends on [control=['if'], data=['line']]
if 'IP address' in line:
ip_address = line.split(',')[0].split()[2]
try:
prefix_len = int(line.split()[5].split('/')[1]) # depends on [control=['try'], data=[]]
except ValueError:
prefix_len = 'N/A' # depends on [control=['except'], data=[]]
val = {'prefix_length': prefix_len}
v4_interfaces.setdefault(interface, {})[ip_address] = val # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']]
v6_interfaces = {}
for line in output_v6.splitlines():
# Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40
# IPv6 address:
# 2001:11:2233::a1/24 [VALID]
# 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID]
# IPv6 subnet: 2001::/24
# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]
# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]
if 'Interface status' in line:
interface = line.split(',')[0]
continue # depends on [control=['if'], data=['line']]
if 'VALID' in line:
line = line.strip()
if 'link-local address' in line:
# match the following format:
# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]
ip_address = line.split()[3]
prefix_len = '64' # depends on [control=['if'], data=['line']]
elif 'IPv6 address' in line:
# match the following format:
# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]
ip_address = line.split()[2]
prefix_len = '64' # depends on [control=['if'], data=['line']]
else:
(ip_address, prefix_len) = line.split()[0].split('/')
prefix_len = int(prefix_len)
val = {'prefix_length': prefix_len}
v6_interfaces.setdefault(interface, {})[ip_address] = val # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']]
# Join data from intermediate dictionaries.
for (interface, data) in v4_interfaces.items():
interfaces_ip.setdefault(interface, {'ipv4': {}})['ipv4'] = data # depends on [control=['for'], data=[]]
for (interface, data) in v6_interfaces.items():
interfaces_ip.setdefault(interface, {'ipv6': {}})['ipv6'] = data # depends on [control=['for'], data=[]]
return interfaces_ip |
def createStatus(self,
repo_user, repo_name, sha, state, target_url=None,
context=None, issue=None, description=None):
"""
:param repo_user: GitHub user or organization
:param repo_name: Name of the repository
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'error'
or 'failure'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Build context
:return: A deferred with the result from GitHub.
This code comes from txgithub by @tomprince.
txgithub is based on twisted's webclient agent, which is much less reliable and featureful
as txrequest (support for proxy, connection pool, keep alive, retry, etc)
"""
payload = {'state': state}
if description is not None:
payload['description'] = description
if target_url is not None:
payload['target_url'] = target_url
if context is not None:
payload['context'] = context
return self._http.post(
'/'.join(['/repos', repo_user, repo_name, 'statuses', sha]),
json=payload) | def function[createStatus, parameter[self, repo_user, repo_name, sha, state, target_url, context, issue, description]]:
constant[
:param repo_user: GitHub user or organization
:param repo_name: Name of the repository
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'error'
or 'failure'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Build context
:return: A deferred with the result from GitHub.
This code comes from txgithub by @tomprince.
txgithub is based on twisted's webclient agent, which is much less reliable and featureful
as txrequest (support for proxy, connection pool, keep alive, retry, etc)
]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c3ed70>], [<ast.Name object at 0x7da1b1c3f070>]]
if compare[name[description] is_not constant[None]] begin[:]
call[name[payload]][constant[description]] assign[=] name[description]
if compare[name[target_url] is_not constant[None]] begin[:]
call[name[payload]][constant[target_url]] assign[=] name[target_url]
if compare[name[context] is_not constant[None]] begin[:]
call[name[payload]][constant[context]] assign[=] name[context]
return[call[name[self]._http.post, parameter[call[constant[/].join, parameter[list[[<ast.Constant object at 0x7da18c4ce290>, <ast.Name object at 0x7da18c4cd390>, <ast.Name object at 0x7da18c4cc1f0>, <ast.Constant object at 0x7da18c4cf760>, <ast.Name object at 0x7da18c4cded0>]]]]]]] | keyword[def] identifier[createStatus] ( identifier[self] ,
identifier[repo_user] , identifier[repo_name] , identifier[sha] , identifier[state] , identifier[target_url] = keyword[None] ,
identifier[context] = keyword[None] , identifier[issue] = keyword[None] , identifier[description] = keyword[None] ):
literal[string]
identifier[payload] ={ literal[string] : identifier[state] }
keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[description]
keyword[if] identifier[target_url] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[target_url]
keyword[if] identifier[context] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[context]
keyword[return] identifier[self] . identifier[_http] . identifier[post] (
literal[string] . identifier[join] ([ literal[string] , identifier[repo_user] , identifier[repo_name] , literal[string] , identifier[sha] ]),
identifier[json] = identifier[payload] ) | def createStatus(self, repo_user, repo_name, sha, state, target_url=None, context=None, issue=None, description=None):
"""
:param repo_user: GitHub user or organization
:param repo_name: Name of the repository
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'error'
or 'failure'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Build context
:return: A deferred with the result from GitHub.
This code comes from txgithub by @tomprince.
txgithub is based on twisted's webclient agent, which is much less reliable and featureful
as txrequest (support for proxy, connection pool, keep alive, retry, etc)
"""
payload = {'state': state}
if description is not None:
payload['description'] = description # depends on [control=['if'], data=['description']]
if target_url is not None:
payload['target_url'] = target_url # depends on [control=['if'], data=['target_url']]
if context is not None:
payload['context'] = context # depends on [control=['if'], data=['context']]
return self._http.post('/'.join(['/repos', repo_user, repo_name, 'statuses', sha]), json=payload) |
def populate_values(self):
'''
Add values from the underlying dash layout configuration
'''
obj = self._get_base_state()
self.base_state = json.dumps(obj) | def function[populate_values, parameter[self]]:
constant[
Add values from the underlying dash layout configuration
]
variable[obj] assign[=] call[name[self]._get_base_state, parameter[]]
name[self].base_state assign[=] call[name[json].dumps, parameter[name[obj]]] | keyword[def] identifier[populate_values] ( identifier[self] ):
literal[string]
identifier[obj] = identifier[self] . identifier[_get_base_state] ()
identifier[self] . identifier[base_state] = identifier[json] . identifier[dumps] ( identifier[obj] ) | def populate_values(self):
"""
Add values from the underlying dash layout configuration
"""
obj = self._get_base_state()
self.base_state = json.dumps(obj) |
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed | def function[_process_underscores, parameter[self, tokens]]:
constant[Strip underscores to make sure the number is correct after join]
variable[groups] assign[=] <ast.ListComp object at 0x7da20eb2bdf0>
variable[flattened] assign[=] <ast.ListComp object at 0x7da18dc04c70>
variable[processed] assign[=] list[[]]
for taget[name[token]] in starred[name[flattened]] begin[:]
if compare[name[token] equal[==] constant[_]] begin[:]
continue
if call[name[token].startswith, parameter[constant[_]]] begin[:]
variable[token] assign[=] call[name[str], parameter[call[name[token]][<ast.Slice object at 0x7da18dc062f0>]]]
if call[name[token].endswith, parameter[constant[_]]] begin[:]
variable[token] assign[=] call[name[str], parameter[call[name[token]][<ast.Slice object at 0x7da18dc07be0>]]]
call[name[processed].append, parameter[name[token]]]
return[name[processed]] | keyword[def] identifier[_process_underscores] ( identifier[self] , identifier[tokens] ):
literal[string]
identifier[groups] =[[ identifier[str] ( literal[string] . identifier[join] ( identifier[el] ))] keyword[if] identifier[b] keyword[else] identifier[list] ( identifier[el] )
keyword[for] ( identifier[b] , identifier[el] ) keyword[in] identifier[itertools] . identifier[groupby] ( identifier[tokens] , keyword[lambda] identifier[k] : identifier[k] == literal[string] )]
identifier[flattened] =[ identifier[el] keyword[for] identifier[group] keyword[in] identifier[groups] keyword[for] identifier[el] keyword[in] identifier[group] ]
identifier[processed] =[]
keyword[for] identifier[token] keyword[in] identifier[flattened] :
keyword[if] identifier[token] == literal[string] : keyword[continue]
keyword[if] identifier[token] . identifier[startswith] ( literal[string] ):
identifier[token] = identifier[str] ( identifier[token] [ literal[int] :])
keyword[if] identifier[token] . identifier[endswith] ( literal[string] ):
identifier[token] = identifier[str] ( identifier[token] [:- literal[int] ])
identifier[processed] . identifier[append] ( identifier[token] )
keyword[return] identifier[processed] | def _process_underscores(self, tokens):
"""Strip underscores to make sure the number is correct after join"""
groups = [[str(''.join(el))] if b else list(el) for (b, el) in itertools.groupby(tokens, lambda k: k == '_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_':
continue # depends on [control=['if'], data=[]]
if token.startswith('_'):
token = str(token[1:]) # depends on [control=['if'], data=[]]
if token.endswith('_'):
token = str(token[:-1]) # depends on [control=['if'], data=[]]
processed.append(token) # depends on [control=['for'], data=['token']]
return processed |
def dump(self,filename=None):
"""
Dumps the object to an output filename (or open file descriptor). The filename
parameter is optional, and if it is not provided, the standard output will be used
@type filename: string or file descriptor
@param filename: file where to dump the object (default standard output)
"""
if filename is None:
with io.BytesIO() as buffer:
self.dump(filename=buffer)
bytes = buffer.getvalue()
getattr(sys.stdout, 'buffer', sys.stdout).write(bytes)
else:
self.tree.write(filename,encoding='UTF-8',pretty_print=True,xml_declaration=True) | def function[dump, parameter[self, filename]]:
constant[
Dumps the object to an output filename (or open file descriptor). The filename
parameter is optional, and if it is not provided, the standard output will be used
@type filename: string or file descriptor
@param filename: file where to dump the object (default standard output)
]
if compare[name[filename] is constant[None]] begin[:]
with call[name[io].BytesIO, parameter[]] begin[:]
call[name[self].dump, parameter[]]
variable[bytes] assign[=] call[name[buffer].getvalue, parameter[]]
call[call[name[getattr], parameter[name[sys].stdout, constant[buffer], name[sys].stdout]].write, parameter[name[bytes]]] | keyword[def] identifier[dump] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
keyword[with] identifier[io] . identifier[BytesIO] () keyword[as] identifier[buffer] :
identifier[self] . identifier[dump] ( identifier[filename] = identifier[buffer] )
identifier[bytes] = identifier[buffer] . identifier[getvalue] ()
identifier[getattr] ( identifier[sys] . identifier[stdout] , literal[string] , identifier[sys] . identifier[stdout] ). identifier[write] ( identifier[bytes] )
keyword[else] :
identifier[self] . identifier[tree] . identifier[write] ( identifier[filename] , identifier[encoding] = literal[string] , identifier[pretty_print] = keyword[True] , identifier[xml_declaration] = keyword[True] ) | def dump(self, filename=None):
"""
Dumps the object to an output filename (or open file descriptor). The filename
parameter is optional, and if it is not provided, the standard output will be used
@type filename: string or file descriptor
@param filename: file where to dump the object (default standard output)
"""
if filename is None:
with io.BytesIO() as buffer:
self.dump(filename=buffer)
bytes = buffer.getvalue()
getattr(sys.stdout, 'buffer', sys.stdout).write(bytes) # depends on [control=['with'], data=['buffer']] # depends on [control=['if'], data=[]]
else:
self.tree.write(filename, encoding='UTF-8', pretty_print=True, xml_declaration=True) |
def SetMarkerColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self._markercolor = Color(color)
if isinstance(self, ROOT.TAttMarker):
ROOT.TAttMarker.SetMarkerColor(self, self._markercolor('root')) | def function[SetMarkerColor, parameter[self, color]]:
constant[
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
]
name[self]._markercolor assign[=] call[name[Color], parameter[name[color]]]
if call[name[isinstance], parameter[name[self], name[ROOT].TAttMarker]] begin[:]
call[name[ROOT].TAttMarker.SetMarkerColor, parameter[name[self], call[name[self]._markercolor, parameter[constant[root]]]]] | keyword[def] identifier[SetMarkerColor] ( identifier[self] , identifier[color] ):
literal[string]
identifier[self] . identifier[_markercolor] = identifier[Color] ( identifier[color] )
keyword[if] identifier[isinstance] ( identifier[self] , identifier[ROOT] . identifier[TAttMarker] ):
identifier[ROOT] . identifier[TAttMarker] . identifier[SetMarkerColor] ( identifier[self] , identifier[self] . identifier[_markercolor] ( literal[string] )) | def SetMarkerColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self._markercolor = Color(color)
if isinstance(self, ROOT.TAttMarker):
ROOT.TAttMarker.SetMarkerColor(self, self._markercolor('root')) # depends on [control=['if'], data=[]] |
def _extract_date(self):
""" Extract date from HTML. """
if self.date:
return
found = False
for pattern in self.config.date:
items = self.parsed_tree.xpath(pattern)
if isinstance(items, basestring):
# In case xpath returns only one element.
items = [items]
for item in items:
if isinstance(item, basestring):
# '_ElementStringResult' object has no attribute 'text'
stripped_date = unicode(item).strip()
else:
try:
stripped_date = item.text.strip()
except AttributeError:
# .text is None. We got a <div> item with span-only
# content. The result will probably be completely
# useless to a python developer, but at least we
# didn't fail handling the siteconfig directive.
stripped_date = etree.tostring(item)
if stripped_date:
# self.date = strtotime(trim(elems, "; \t\n\r\0\x0B"))
self.date = stripped_date
LOGGER.info(u'Date extracted: %s.', stripped_date,
extra={'siteconfig': self.config.host})
found = True
break
if found:
break | def function[_extract_date, parameter[self]]:
constant[ Extract date from HTML. ]
if name[self].date begin[:]
return[None]
variable[found] assign[=] constant[False]
for taget[name[pattern]] in starred[name[self].config.date] begin[:]
variable[items] assign[=] call[name[self].parsed_tree.xpath, parameter[name[pattern]]]
if call[name[isinstance], parameter[name[items], name[basestring]]] begin[:]
variable[items] assign[=] list[[<ast.Name object at 0x7da1b0aa5cf0>]]
for taget[name[item]] in starred[name[items]] begin[:]
if call[name[isinstance], parameter[name[item], name[basestring]]] begin[:]
variable[stripped_date] assign[=] call[call[name[unicode], parameter[name[item]]].strip, parameter[]]
if name[stripped_date] begin[:]
name[self].date assign[=] name[stripped_date]
call[name[LOGGER].info, parameter[constant[Date extracted: %s.], name[stripped_date]]]
variable[found] assign[=] constant[True]
break
if name[found] begin[:]
break | keyword[def] identifier[_extract_date] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[date] :
keyword[return]
identifier[found] = keyword[False]
keyword[for] identifier[pattern] keyword[in] identifier[self] . identifier[config] . identifier[date] :
identifier[items] = identifier[self] . identifier[parsed_tree] . identifier[xpath] ( identifier[pattern] )
keyword[if] identifier[isinstance] ( identifier[items] , identifier[basestring] ):
identifier[items] =[ identifier[items] ]
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[basestring] ):
identifier[stripped_date] = identifier[unicode] ( identifier[item] ). identifier[strip] ()
keyword[else] :
keyword[try] :
identifier[stripped_date] = identifier[item] . identifier[text] . identifier[strip] ()
keyword[except] identifier[AttributeError] :
identifier[stripped_date] = identifier[etree] . identifier[tostring] ( identifier[item] )
keyword[if] identifier[stripped_date] :
identifier[self] . identifier[date] = identifier[stripped_date]
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[stripped_date] ,
identifier[extra] ={ literal[string] : identifier[self] . identifier[config] . identifier[host] })
identifier[found] = keyword[True]
keyword[break]
keyword[if] identifier[found] :
keyword[break] | def _extract_date(self):
""" Extract date from HTML. """
if self.date:
return # depends on [control=['if'], data=[]]
found = False
for pattern in self.config.date:
items = self.parsed_tree.xpath(pattern)
if isinstance(items, basestring):
# In case xpath returns only one element.
items = [items] # depends on [control=['if'], data=[]]
for item in items:
if isinstance(item, basestring):
# '_ElementStringResult' object has no attribute 'text'
stripped_date = unicode(item).strip() # depends on [control=['if'], data=[]]
else:
try:
stripped_date = item.text.strip() # depends on [control=['try'], data=[]]
except AttributeError:
# .text is None. We got a <div> item with span-only
# content. The result will probably be completely
# useless to a python developer, but at least we
# didn't fail handling the siteconfig directive.
stripped_date = etree.tostring(item) # depends on [control=['except'], data=[]]
if stripped_date:
# self.date = strtotime(trim(elems, "; \t\n\r\0\x0B"))
self.date = stripped_date
LOGGER.info(u'Date extracted: %s.', stripped_date, extra={'siteconfig': self.config.host})
found = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
if found:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']] |
def macro_network():
"""A network of micro elements which has greater integrated information
after coarse graining to a macro scale.
"""
tpm = np.array([[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 1.0, 1.0],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 1.0, 1.0],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 1.0, 1.0],
[1.0, 1.0, 0.3, 0.3],
[1.0, 1.0, 0.3, 0.3],
[1.0, 1.0, 0.3, 0.3],
[1.0, 1.0, 1.0, 1.0]])
return Network(tpm, node_labels=LABELS[:tpm.shape[1]]) | def function[macro_network, parameter[]]:
constant[A network of micro elements which has greater integrated information
after coarse graining to a macro scale.
]
variable[tpm] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da18f720730>, <ast.List object at 0x7da18f721150>, <ast.List object at 0x7da18f721f60>, <ast.List object at 0x7da18f720b20>, <ast.List object at 0x7da18f7225c0>, <ast.List object at 0x7da18f721bd0>, <ast.List object at 0x7da18f720a60>, <ast.List object at 0x7da18f722aa0>, <ast.List object at 0x7da18f7209d0>, <ast.List object at 0x7da18f721960>, <ast.List object at 0x7da18f723610>, <ast.List object at 0x7da18f721450>, <ast.List object at 0x7da18f721210>, <ast.List object at 0x7da18f723850>, <ast.List object at 0x7da18f722020>, <ast.List object at 0x7da18f7228f0>]]]]
return[call[name[Network], parameter[name[tpm]]]] | keyword[def] identifier[macro_network] ():
literal[string]
identifier[tpm] = identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ]])
keyword[return] identifier[Network] ( identifier[tpm] , identifier[node_labels] = identifier[LABELS] [: identifier[tpm] . identifier[shape] [ literal[int] ]]) | def macro_network():
"""A network of micro elements which has greater integrated information
after coarse graining to a macro scale.
"""
tpm = np.array([[0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 1.0, 1.0]])
return Network(tpm, node_labels=LABELS[:tpm.shape[1]]) |
def do_bd(self, arg):
"""
[~process] bd <address> - disable a code breakpoint
[~thread] bd <address> - disable a hardware breakpoint
[~process] bd <address-address> - disable a memory breakpoint
[~process] bd <address> <size> - disable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.disable_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.disable_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.disable_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.") | def function[do_bd, parameter[self, arg]]:
constant[
[~process] bd <address> - disable a code breakpoint
[~thread] bd <address> - disable a hardware breakpoint
[~process] bd <address-address> - disable a memory breakpoint
[~process] bd <address> <size> - disable a memory breakpoint
]
variable[token_list] assign[=] call[name[self].split_tokens, parameter[name[arg], constant[1], constant[2]]]
<ast.Tuple object at 0x7da1b23470a0> assign[=] call[name[self].input_breakpoint, parameter[name[token_list]]]
variable[debug] assign[=] name[self].debug
variable[found] assign[=] constant[False]
if compare[name[size] is constant[None]] begin[:]
if compare[name[tid] is_not constant[None]] begin[:]
if call[name[debug].has_hardware_breakpoint, parameter[name[tid], name[address]]] begin[:]
call[name[debug].disable_hardware_breakpoint, parameter[name[tid], name[address]]]
variable[found] assign[=] constant[True]
if compare[name[pid] is_not constant[None]] begin[:]
if call[name[debug].has_code_breakpoint, parameter[name[pid], name[address]]] begin[:]
call[name[debug].disable_code_breakpoint, parameter[name[pid], name[address]]]
variable[found] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da20c6a9660> begin[:]
call[name[print], parameter[constant[Error: breakpoint not found.]]] | keyword[def] identifier[do_bd] ( identifier[self] , identifier[arg] ):
literal[string]
identifier[token_list] = identifier[self] . identifier[split_tokens] ( identifier[arg] , literal[int] , literal[int] )
identifier[pid] , identifier[tid] , identifier[address] , identifier[size] = identifier[self] . identifier[input_breakpoint] ( identifier[token_list] )
identifier[debug] = identifier[self] . identifier[debug]
identifier[found] = keyword[False]
keyword[if] identifier[size] keyword[is] keyword[None] :
keyword[if] identifier[tid] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[debug] . identifier[has_hardware_breakpoint] ( identifier[tid] , identifier[address] ):
identifier[debug] . identifier[disable_hardware_breakpoint] ( identifier[tid] , identifier[address] )
identifier[found] = keyword[True]
keyword[if] identifier[pid] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[debug] . identifier[has_code_breakpoint] ( identifier[pid] , identifier[address] ):
identifier[debug] . identifier[disable_code_breakpoint] ( identifier[pid] , identifier[address] )
identifier[found] = keyword[True]
keyword[else] :
keyword[if] identifier[debug] . identifier[has_page_breakpoint] ( identifier[pid] , identifier[address] ):
identifier[debug] . identifier[disable_page_breakpoint] ( identifier[pid] , identifier[address] )
identifier[found] = keyword[True]
keyword[if] keyword[not] identifier[found] :
identifier[print] ( literal[string] ) | def do_bd(self, arg):
"""
[~process] bd <address> - disable a code breakpoint
[~thread] bd <address> - disable a hardware breakpoint
[~process] bd <address-address> - disable a memory breakpoint
[~process] bd <address> <size> - disable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
(pid, tid, address, size) = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.disable_hardware_breakpoint(tid, address)
found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['tid']]
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.disable_code_breakpoint(pid, address)
found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pid']] # depends on [control=['if'], data=[]]
elif debug.has_page_breakpoint(pid, address):
debug.disable_page_breakpoint(pid, address)
found = True # depends on [control=['if'], data=[]]
if not found:
print('Error: breakpoint not found.') # depends on [control=['if'], data=[]] |
def mdadm():
'''
Return list of mdadm devices
'''
devices = set()
try:
with salt.utils.files.fopen('/proc/mdstat', 'r') as mdstat:
for line in mdstat:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('Personalities : '):
continue
if line.startswith('unused devices:'):
continue
if ' : ' in line:
devices.add(line.split(' : ')[0])
except IOError:
return {}
devices = sorted(devices)
if devices:
log.trace('mdadm devices detected: %s', ', '.join(devices))
return {'mdadm': devices} | def function[mdadm, parameter[]]:
constant[
Return list of mdadm devices
]
variable[devices] assign[=] call[name[set], parameter[]]
<ast.Try object at 0x7da2054a58a0>
variable[devices] assign[=] call[name[sorted], parameter[name[devices]]]
if name[devices] begin[:]
call[name[log].trace, parameter[constant[mdadm devices detected: %s], call[constant[, ].join, parameter[name[devices]]]]]
return[dictionary[[<ast.Constant object at 0x7da2054a4100>], [<ast.Name object at 0x7da2054a76a0>]]] | keyword[def] identifier[mdadm] ():
literal[string]
identifier[devices] = identifier[set] ()
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( literal[string] , literal[string] ) keyword[as] identifier[mdstat] :
keyword[for] identifier[line] keyword[in] identifier[mdstat] :
identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] )
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[devices] . identifier[add] ( identifier[line] . identifier[split] ( literal[string] )[ literal[int] ])
keyword[except] identifier[IOError] :
keyword[return] {}
identifier[devices] = identifier[sorted] ( identifier[devices] )
keyword[if] identifier[devices] :
identifier[log] . identifier[trace] ( literal[string] , literal[string] . identifier[join] ( identifier[devices] ))
keyword[return] { literal[string] : identifier[devices] } | def mdadm():
"""
Return list of mdadm devices
"""
devices = set()
try:
with salt.utils.files.fopen('/proc/mdstat', 'r') as mdstat:
for line in mdstat:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('Personalities : '):
continue # depends on [control=['if'], data=[]]
if line.startswith('unused devices:'):
continue # depends on [control=['if'], data=[]]
if ' : ' in line:
devices.add(line.split(' : ')[0]) # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['mdstat']] # depends on [control=['try'], data=[]]
except IOError:
return {} # depends on [control=['except'], data=[]]
devices = sorted(devices)
if devices:
log.trace('mdadm devices detected: %s', ', '.join(devices)) # depends on [control=['if'], data=[]]
return {'mdadm': devices} |
def frozen_default_graph_def(input_node_names, output_node_names):
"""Return frozen and simplified graph_def of default graph."""
sess = tf.get_default_session()
input_graph_def = tf.get_default_graph().as_graph_def()
pruned_graph = tf.graph_util.remove_training_nodes(
input_graph_def, protected_nodes=(output_node_names + input_node_names)
)
pruned_graph = tf.graph_util.extract_sub_graph(pruned_graph, output_node_names)
# remove explicit device assignments
for node in pruned_graph.node:
node.device = ""
all_variable_names = [v.op.name for v in tf.global_variables()]
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=pruned_graph,
output_node_names=output_node_names,
variable_names_whitelist=all_variable_names,
)
return output_graph_def | def function[frozen_default_graph_def, parameter[input_node_names, output_node_names]]:
constant[Return frozen and simplified graph_def of default graph.]
variable[sess] assign[=] call[name[tf].get_default_session, parameter[]]
variable[input_graph_def] assign[=] call[call[name[tf].get_default_graph, parameter[]].as_graph_def, parameter[]]
variable[pruned_graph] assign[=] call[name[tf].graph_util.remove_training_nodes, parameter[name[input_graph_def]]]
variable[pruned_graph] assign[=] call[name[tf].graph_util.extract_sub_graph, parameter[name[pruned_graph], name[output_node_names]]]
for taget[name[node]] in starred[name[pruned_graph].node] begin[:]
name[node].device assign[=] constant[]
variable[all_variable_names] assign[=] <ast.ListComp object at 0x7da1b2043eb0>
variable[output_graph_def] assign[=] call[name[tf].graph_util.convert_variables_to_constants, parameter[]]
return[name[output_graph_def]] | keyword[def] identifier[frozen_default_graph_def] ( identifier[input_node_names] , identifier[output_node_names] ):
literal[string]
identifier[sess] = identifier[tf] . identifier[get_default_session] ()
identifier[input_graph_def] = identifier[tf] . identifier[get_default_graph] (). identifier[as_graph_def] ()
identifier[pruned_graph] = identifier[tf] . identifier[graph_util] . identifier[remove_training_nodes] (
identifier[input_graph_def] , identifier[protected_nodes] =( identifier[output_node_names] + identifier[input_node_names] )
)
identifier[pruned_graph] = identifier[tf] . identifier[graph_util] . identifier[extract_sub_graph] ( identifier[pruned_graph] , identifier[output_node_names] )
keyword[for] identifier[node] keyword[in] identifier[pruned_graph] . identifier[node] :
identifier[node] . identifier[device] = literal[string]
identifier[all_variable_names] =[ identifier[v] . identifier[op] . identifier[name] keyword[for] identifier[v] keyword[in] identifier[tf] . identifier[global_variables] ()]
identifier[output_graph_def] = identifier[tf] . identifier[graph_util] . identifier[convert_variables_to_constants] (
identifier[sess] = identifier[sess] ,
identifier[input_graph_def] = identifier[pruned_graph] ,
identifier[output_node_names] = identifier[output_node_names] ,
identifier[variable_names_whitelist] = identifier[all_variable_names] ,
)
keyword[return] identifier[output_graph_def] | def frozen_default_graph_def(input_node_names, output_node_names):
"""Return frozen and simplified graph_def of default graph."""
sess = tf.get_default_session()
input_graph_def = tf.get_default_graph().as_graph_def()
pruned_graph = tf.graph_util.remove_training_nodes(input_graph_def, protected_nodes=output_node_names + input_node_names)
pruned_graph = tf.graph_util.extract_sub_graph(pruned_graph, output_node_names)
# remove explicit device assignments
for node in pruned_graph.node:
node.device = '' # depends on [control=['for'], data=['node']]
all_variable_names = [v.op.name for v in tf.global_variables()]
output_graph_def = tf.graph_util.convert_variables_to_constants(sess=sess, input_graph_def=pruned_graph, output_node_names=output_node_names, variable_names_whitelist=all_variable_names)
return output_graph_def |
def check_lazy_load_gemeente(f):
'''
Decorator function to lazy load a :class:`Gemeente`.
'''
def wrapper(self):
gemeente = self
if (getattr(gemeente, '_%s' % f.__name__, None) is None):
log.debug('Lazy loading Gemeente %d', gemeente.id)
gemeente.check_gateway()
g = gemeente.gateway.get_gemeente_by_id(gemeente.id)
gemeente._naam = g._naam
gemeente._centroid = g._centroid
gemeente._bounding_box = g._bounding_box
return f(self)
return wrapper | def function[check_lazy_load_gemeente, parameter[f]]:
constant[
Decorator function to lazy load a :class:`Gemeente`.
]
def function[wrapper, parameter[self]]:
variable[gemeente] assign[=] name[self]
if compare[call[name[getattr], parameter[name[gemeente], binary_operation[constant[_%s] <ast.Mod object at 0x7da2590d6920> name[f].__name__], constant[None]]] is constant[None]] begin[:]
call[name[log].debug, parameter[constant[Lazy loading Gemeente %d], name[gemeente].id]]
call[name[gemeente].check_gateway, parameter[]]
variable[g] assign[=] call[name[gemeente].gateway.get_gemeente_by_id, parameter[name[gemeente].id]]
name[gemeente]._naam assign[=] name[g]._naam
name[gemeente]._centroid assign[=] name[g]._centroid
name[gemeente]._bounding_box assign[=] name[g]._bounding_box
return[call[name[f], parameter[name[self]]]]
return[name[wrapper]] | keyword[def] identifier[check_lazy_load_gemeente] ( identifier[f] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[self] ):
identifier[gemeente] = identifier[self]
keyword[if] ( identifier[getattr] ( identifier[gemeente] , literal[string] % identifier[f] . identifier[__name__] , keyword[None] ) keyword[is] keyword[None] ):
identifier[log] . identifier[debug] ( literal[string] , identifier[gemeente] . identifier[id] )
identifier[gemeente] . identifier[check_gateway] ()
identifier[g] = identifier[gemeente] . identifier[gateway] . identifier[get_gemeente_by_id] ( identifier[gemeente] . identifier[id] )
identifier[gemeente] . identifier[_naam] = identifier[g] . identifier[_naam]
identifier[gemeente] . identifier[_centroid] = identifier[g] . identifier[_centroid]
identifier[gemeente] . identifier[_bounding_box] = identifier[g] . identifier[_bounding_box]
keyword[return] identifier[f] ( identifier[self] )
keyword[return] identifier[wrapper] | def check_lazy_load_gemeente(f):
"""
Decorator function to lazy load a :class:`Gemeente`.
"""
def wrapper(self):
gemeente = self
if getattr(gemeente, '_%s' % f.__name__, None) is None:
log.debug('Lazy loading Gemeente %d', gemeente.id)
gemeente.check_gateway()
g = gemeente.gateway.get_gemeente_by_id(gemeente.id)
gemeente._naam = g._naam
gemeente._centroid = g._centroid
gemeente._bounding_box = g._bounding_box # depends on [control=['if'], data=[]]
return f(self)
return wrapper |
async def send_rpc(self, conn_id, address, rpc_id, payload, timeout):
"""Asynchronously send an RPC to this IOTile device
Args:
conn_id (int): A unique identifier that will refer to this connection
address (int): the address of the tile that we wish to send the RPC to
rpc_id (int): the 16-bit id of the RPC we want to call
payload (bytearray): the payload of the command
timeout (float): the number of seconds to wait for the RPC to execute
"""
try:
return await super(EmulatedDeviceAdapter, self).send_rpc(conn_id, address, rpc_id, payload, timeout)
finally:
for dev in self.devices.values():
dev.wait_idle() | <ast.AsyncFunctionDef object at 0x7da20e956500> | keyword[async] keyword[def] identifier[send_rpc] ( identifier[self] , identifier[conn_id] , identifier[address] , identifier[rpc_id] , identifier[payload] , identifier[timeout] ):
literal[string]
keyword[try] :
keyword[return] keyword[await] identifier[super] ( identifier[EmulatedDeviceAdapter] , identifier[self] ). identifier[send_rpc] ( identifier[conn_id] , identifier[address] , identifier[rpc_id] , identifier[payload] , identifier[timeout] )
keyword[finally] :
keyword[for] identifier[dev] keyword[in] identifier[self] . identifier[devices] . identifier[values] ():
identifier[dev] . identifier[wait_idle] () | async def send_rpc(self, conn_id, address, rpc_id, payload, timeout):
"""Asynchronously send an RPC to this IOTile device
Args:
conn_id (int): A unique identifier that will refer to this connection
address (int): the address of the tile that we wish to send the RPC to
rpc_id (int): the 16-bit id of the RPC we want to call
payload (bytearray): the payload of the command
timeout (float): the number of seconds to wait for the RPC to execute
"""
try:
return await super(EmulatedDeviceAdapter, self).send_rpc(conn_id, address, rpc_id, payload, timeout) # depends on [control=['try'], data=[]]
finally:
for dev in self.devices.values():
dev.wait_idle() # depends on [control=['for'], data=['dev']] |
def convert_coord(coord_from,matrix_file,base_to_aligned=True):
'''Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate
matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``'''
with open(matrix_file) as f:
try:
values = [float(y) for y in ' '.join([x for x in f.readlines() if x.strip()[0]!='#']).strip().split()]
except:
nl.notify('Error reading values from matrix file %s' % matrix_file, level=nl.level.error)
return False
if len(values)!=12:
nl.notify('Error: found %d values in matrix file %s (expecting 12)' % (len(values),matrix_file), level=nl.level.error)
return False
matrix = np.vstack((np.array(values).reshape((3,-1)),[0,0,0,1]))
if not base_to_aligned:
matrix = np.linalg.inv(matrix)
return np.dot(matrix,list(coord_from) + [1])[:3] | def function[convert_coord, parameter[coord_from, matrix_file, base_to_aligned]]:
constant[Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate
matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``]
with call[name[open], parameter[name[matrix_file]]] begin[:]
<ast.Try object at 0x7da20c990670>
if compare[call[name[len], parameter[name[values]]] not_equal[!=] constant[12]] begin[:]
call[name[nl].notify, parameter[binary_operation[constant[Error: found %d values in matrix file %s (expecting 12)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c991a80>, <ast.Name object at 0x7da20c993850>]]]]]
return[constant[False]]
variable[matrix] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Call object at 0x7da20c9927d0>, <ast.List object at 0x7da20c990910>]]]]
if <ast.UnaryOp object at 0x7da20c993cd0> begin[:]
variable[matrix] assign[=] call[name[np].linalg.inv, parameter[name[matrix]]]
return[call[call[name[np].dot, parameter[name[matrix], binary_operation[call[name[list], parameter[name[coord_from]]] + list[[<ast.Constant object at 0x7da20c991180>]]]]]][<ast.Slice object at 0x7da20c993c10>]] | keyword[def] identifier[convert_coord] ( identifier[coord_from] , identifier[matrix_file] , identifier[base_to_aligned] = keyword[True] ):
literal[string]
keyword[with] identifier[open] ( identifier[matrix_file] ) keyword[as] identifier[f] :
keyword[try] :
identifier[values] =[ identifier[float] ( identifier[y] ) keyword[for] identifier[y] keyword[in] literal[string] . identifier[join] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[f] . identifier[readlines] () keyword[if] identifier[x] . identifier[strip] ()[ literal[int] ]!= literal[string] ]). identifier[strip] (). identifier[split] ()]
keyword[except] :
identifier[nl] . identifier[notify] ( literal[string] % identifier[matrix_file] , identifier[level] = identifier[nl] . identifier[level] . identifier[error] )
keyword[return] keyword[False]
keyword[if] identifier[len] ( identifier[values] )!= literal[int] :
identifier[nl] . identifier[notify] ( literal[string] %( identifier[len] ( identifier[values] ), identifier[matrix_file] ), identifier[level] = identifier[nl] . identifier[level] . identifier[error] )
keyword[return] keyword[False]
identifier[matrix] = identifier[np] . identifier[vstack] (( identifier[np] . identifier[array] ( identifier[values] ). identifier[reshape] (( literal[int] ,- literal[int] )),[ literal[int] , literal[int] , literal[int] , literal[int] ]))
keyword[if] keyword[not] identifier[base_to_aligned] :
identifier[matrix] = identifier[np] . identifier[linalg] . identifier[inv] ( identifier[matrix] )
keyword[return] identifier[np] . identifier[dot] ( identifier[matrix] , identifier[list] ( identifier[coord_from] )+[ literal[int] ])[: literal[int] ] | def convert_coord(coord_from, matrix_file, base_to_aligned=True):
"""Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate
matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``"""
with open(matrix_file) as f:
try:
values = [float(y) for y in ' '.join([x for x in f.readlines() if x.strip()[0] != '#']).strip().split()] # depends on [control=['try'], data=[]]
except:
nl.notify('Error reading values from matrix file %s' % matrix_file, level=nl.level.error)
return False # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['f']]
if len(values) != 12:
nl.notify('Error: found %d values in matrix file %s (expecting 12)' % (len(values), matrix_file), level=nl.level.error)
return False # depends on [control=['if'], data=[]]
matrix = np.vstack((np.array(values).reshape((3, -1)), [0, 0, 0, 1]))
if not base_to_aligned:
matrix = np.linalg.inv(matrix) # depends on [control=['if'], data=[]]
return np.dot(matrix, list(coord_from) + [1])[:3] |
def is_public(self):
"""Returns True if the public-read ACL is set for the Key."""
for grant in self._boto_object.Acl().grants:
if 'AllUsers' in grant['Grantee'].get('URI', ''):
if grant['Permission'] == 'READ':
return True
return False | def function[is_public, parameter[self]]:
constant[Returns True if the public-read ACL is set for the Key.]
for taget[name[grant]] in starred[call[name[self]._boto_object.Acl, parameter[]].grants] begin[:]
if compare[constant[AllUsers] in call[call[name[grant]][constant[Grantee]].get, parameter[constant[URI], constant[]]]] begin[:]
if compare[call[name[grant]][constant[Permission]] equal[==] constant[READ]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_public] ( identifier[self] ):
literal[string]
keyword[for] identifier[grant] keyword[in] identifier[self] . identifier[_boto_object] . identifier[Acl] (). identifier[grants] :
keyword[if] literal[string] keyword[in] identifier[grant] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] ):
keyword[if] identifier[grant] [ literal[string] ]== literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_public(self):
"""Returns True if the public-read ACL is set for the Key."""
for grant in self._boto_object.Acl().grants:
if 'AllUsers' in grant['Grantee'].get('URI', ''):
if grant['Permission'] == 'READ':
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['grant']]
return False |
def unless(exception_class_or_tuple, func, *args, **kwargs):
"""
When `exception_class_or_tuple` occurs while executing `func`, it will
be caught and ``None`` will be returned.
>>> f = where(X > 10) | list | unless(IndexError, X[0])
>>> f([5, 8, 12, 4])
12
>>> f([1, 2, 3])
None
"""
@pipe_util
@auto_string_formatter
@data_structure_builder
def construct_unless(function):
# a wrapper so we can re-use the decorators
def _unless(*args, **kwargs):
try:
return function(*args, **kwargs)
except exception_class_or_tuple:
pass
return _unless
name = lambda: 'unless(%s, %s)' % (exception_class_or_tuple, ', '.join(
filter(None, (get_name(func), repr_args(*args, **kwargs)))))
return set_name(name, construct_unless(func, *args, **kwargs)) | def function[unless, parameter[exception_class_or_tuple, func]]:
constant[
When `exception_class_or_tuple` occurs while executing `func`, it will
be caught and ``None`` will be returned.
>>> f = where(X > 10) | list | unless(IndexError, X[0])
>>> f([5, 8, 12, 4])
12
>>> f([1, 2, 3])
None
]
def function[construct_unless, parameter[function]]:
def function[_unless, parameter[]]:
<ast.Try object at 0x7da2054a6ad0>
return[name[_unless]]
variable[name] assign[=] <ast.Lambda object at 0x7da2054a7a90>
return[call[name[set_name], parameter[name[name], call[name[construct_unless], parameter[name[func], <ast.Starred object at 0x7da2054a7f40>]]]]] | keyword[def] identifier[unless] ( identifier[exception_class_or_tuple] , identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
@ identifier[pipe_util]
@ identifier[auto_string_formatter]
@ identifier[data_structure_builder]
keyword[def] identifier[construct_unless] ( identifier[function] ):
keyword[def] identifier[_unless] (* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
keyword[return] identifier[function] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[exception_class_or_tuple] :
keyword[pass]
keyword[return] identifier[_unless]
identifier[name] = keyword[lambda] : literal[string] %( identifier[exception_class_or_tuple] , literal[string] . identifier[join] (
identifier[filter] ( keyword[None] ,( identifier[get_name] ( identifier[func] ), identifier[repr_args] (* identifier[args] ,** identifier[kwargs] )))))
keyword[return] identifier[set_name] ( identifier[name] , identifier[construct_unless] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] )) | def unless(exception_class_or_tuple, func, *args, **kwargs):
"""
When `exception_class_or_tuple` occurs while executing `func`, it will
be caught and ``None`` will be returned.
>>> f = where(X > 10) | list | unless(IndexError, X[0])
>>> f([5, 8, 12, 4])
12
>>> f([1, 2, 3])
None
"""
@pipe_util
@auto_string_formatter
@data_structure_builder
def construct_unless(function):
# a wrapper so we can re-use the decorators
def _unless(*args, **kwargs):
try:
return function(*args, **kwargs) # depends on [control=['try'], data=[]]
except exception_class_or_tuple:
pass # depends on [control=['except'], data=[]]
return _unless
name = lambda : 'unless(%s, %s)' % (exception_class_or_tuple, ', '.join(filter(None, (get_name(func), repr_args(*args, **kwargs)))))
return set_name(name, construct_unless(func, *args, **kwargs)) |
def iter_grants(self, as_json=True):
"""Fetch records from the SQLite database."""
self._connect()
result = self.db_connection.cursor().execute(
"SELECT data, format FROM grants"
)
for data, data_format in result:
if (not as_json) and data_format == 'json':
raise Exception("Cannot convert JSON source to XML output.")
elif as_json and data_format == 'xml':
data = self.grantxml2json(data)
elif as_json and data_format == 'json':
data = json.loads(data)
yield data
self._disconnect() | def function[iter_grants, parameter[self, as_json]]:
constant[Fetch records from the SQLite database.]
call[name[self]._connect, parameter[]]
variable[result] assign[=] call[call[name[self].db_connection.cursor, parameter[]].execute, parameter[constant[SELECT data, format FROM grants]]]
for taget[tuple[[<ast.Name object at 0x7da1b0bd97e0>, <ast.Name object at 0x7da1b0bda470>]]] in starred[name[result]] begin[:]
if <ast.BoolOp object at 0x7da1b0bd97b0> begin[:]
<ast.Raise object at 0x7da1b0bd8c40>
<ast.Yield object at 0x7da1b0a0ffd0>
call[name[self]._disconnect, parameter[]] | keyword[def] identifier[iter_grants] ( identifier[self] , identifier[as_json] = keyword[True] ):
literal[string]
identifier[self] . identifier[_connect] ()
identifier[result] = identifier[self] . identifier[db_connection] . identifier[cursor] (). identifier[execute] (
literal[string]
)
keyword[for] identifier[data] , identifier[data_format] keyword[in] identifier[result] :
keyword[if] ( keyword[not] identifier[as_json] ) keyword[and] identifier[data_format] == literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] identifier[as_json] keyword[and] identifier[data_format] == literal[string] :
identifier[data] = identifier[self] . identifier[grantxml2json] ( identifier[data] )
keyword[elif] identifier[as_json] keyword[and] identifier[data_format] == literal[string] :
identifier[data] = identifier[json] . identifier[loads] ( identifier[data] )
keyword[yield] identifier[data]
identifier[self] . identifier[_disconnect] () | def iter_grants(self, as_json=True):
"""Fetch records from the SQLite database."""
self._connect()
result = self.db_connection.cursor().execute('SELECT data, format FROM grants')
for (data, data_format) in result:
if not as_json and data_format == 'json':
raise Exception('Cannot convert JSON source to XML output.') # depends on [control=['if'], data=[]]
elif as_json and data_format == 'xml':
data = self.grantxml2json(data) # depends on [control=['if'], data=[]]
elif as_json and data_format == 'json':
data = json.loads(data) # depends on [control=['if'], data=[]]
yield data # depends on [control=['for'], data=[]]
self._disconnect() |
def detect_logging_level(self, node):
"""
Heuristic to decide whether an AST Call is a logging call.
"""
try:
if self.get_id_attr(node.func.value) == "warnings":
return None
# NB: We could also look at the argument signature or the target attribute
if node.func.attr in LOGGING_LEVELS:
return node.func.attr
except AttributeError:
pass
return None | def function[detect_logging_level, parameter[self, node]]:
constant[
Heuristic to decide whether an AST Call is a logging call.
]
<ast.Try object at 0x7da1b03f8d90>
return[constant[None]] | keyword[def] identifier[detect_logging_level] ( identifier[self] , identifier[node] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[get_id_attr] ( identifier[node] . identifier[func] . identifier[value] )== literal[string] :
keyword[return] keyword[None]
keyword[if] identifier[node] . identifier[func] . identifier[attr] keyword[in] identifier[LOGGING_LEVELS] :
keyword[return] identifier[node] . identifier[func] . identifier[attr]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[return] keyword[None] | def detect_logging_level(self, node):
"""
Heuristic to decide whether an AST Call is a logging call.
"""
try:
if self.get_id_attr(node.func.value) == 'warnings':
return None # depends on [control=['if'], data=[]]
# NB: We could also look at the argument signature or the target attribute
if node.func.attr in LOGGING_LEVELS:
return node.func.attr # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
return None |
def run(config, clear_opt=False):
"""Find an image and download it."""
flickr = flickrapi.FlickrAPI(config.get('walls', 'api_key'),
config.get('walls', 'api_secret'))
width = config.getint('walls', 'width')
height = config.getint('walls', 'height')
# Clear out the destination dir
if clear_opt:
clear_dir(os.path.expanduser(config.get('walls', 'image_dir')))
# Find an image
tags = config.get('walls', 'tags')
for photo in flickr.walk(tags=tags, format='etree'):
try:
photo_url = smallest_url(flickr, photo.get('id'), width, height)
if photo_url:
break
except (KeyError, ValueError, TypeError):
stderr_and_exit('Unexpected data from Flickr.\n')
else:
stderr_and_exit('No matching photos found.\n')
# Download the image
dest = os.path.expanduser(config.get('walls', 'image_dir'))
try:
download(photo_url, dest)
except IOError:
stderr_and_exit('Error downloading image.\n') | def function[run, parameter[config, clear_opt]]:
constant[Find an image and download it.]
variable[flickr] assign[=] call[name[flickrapi].FlickrAPI, parameter[call[name[config].get, parameter[constant[walls], constant[api_key]]], call[name[config].get, parameter[constant[walls], constant[api_secret]]]]]
variable[width] assign[=] call[name[config].getint, parameter[constant[walls], constant[width]]]
variable[height] assign[=] call[name[config].getint, parameter[constant[walls], constant[height]]]
if name[clear_opt] begin[:]
call[name[clear_dir], parameter[call[name[os].path.expanduser, parameter[call[name[config].get, parameter[constant[walls], constant[image_dir]]]]]]]
variable[tags] assign[=] call[name[config].get, parameter[constant[walls], constant[tags]]]
for taget[name[photo]] in starred[call[name[flickr].walk, parameter[]]] begin[:]
<ast.Try object at 0x7da18bc73100>
variable[dest] assign[=] call[name[os].path.expanduser, parameter[call[name[config].get, parameter[constant[walls], constant[image_dir]]]]]
<ast.Try object at 0x7da18ede5180> | keyword[def] identifier[run] ( identifier[config] , identifier[clear_opt] = keyword[False] ):
literal[string]
identifier[flickr] = identifier[flickrapi] . identifier[FlickrAPI] ( identifier[config] . identifier[get] ( literal[string] , literal[string] ),
identifier[config] . identifier[get] ( literal[string] , literal[string] ))
identifier[width] = identifier[config] . identifier[getint] ( literal[string] , literal[string] )
identifier[height] = identifier[config] . identifier[getint] ( literal[string] , literal[string] )
keyword[if] identifier[clear_opt] :
identifier[clear_dir] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[config] . identifier[get] ( literal[string] , literal[string] )))
identifier[tags] = identifier[config] . identifier[get] ( literal[string] , literal[string] )
keyword[for] identifier[photo] keyword[in] identifier[flickr] . identifier[walk] ( identifier[tags] = identifier[tags] , identifier[format] = literal[string] ):
keyword[try] :
identifier[photo_url] = identifier[smallest_url] ( identifier[flickr] , identifier[photo] . identifier[get] ( literal[string] ), identifier[width] , identifier[height] )
keyword[if] identifier[photo_url] :
keyword[break]
keyword[except] ( identifier[KeyError] , identifier[ValueError] , identifier[TypeError] ):
identifier[stderr_and_exit] ( literal[string] )
keyword[else] :
identifier[stderr_and_exit] ( literal[string] )
identifier[dest] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[config] . identifier[get] ( literal[string] , literal[string] ))
keyword[try] :
identifier[download] ( identifier[photo_url] , identifier[dest] )
keyword[except] identifier[IOError] :
identifier[stderr_and_exit] ( literal[string] ) | def run(config, clear_opt=False):
"""Find an image and download it."""
flickr = flickrapi.FlickrAPI(config.get('walls', 'api_key'), config.get('walls', 'api_secret'))
width = config.getint('walls', 'width')
height = config.getint('walls', 'height')
# Clear out the destination dir
if clear_opt:
clear_dir(os.path.expanduser(config.get('walls', 'image_dir'))) # depends on [control=['if'], data=[]]
# Find an image
tags = config.get('walls', 'tags')
for photo in flickr.walk(tags=tags, format='etree'):
try:
photo_url = smallest_url(flickr, photo.get('id'), width, height)
if photo_url:
break # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (KeyError, ValueError, TypeError):
stderr_and_exit('Unexpected data from Flickr.\n') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['photo']]
else:
stderr_and_exit('No matching photos found.\n')
# Download the image
dest = os.path.expanduser(config.get('walls', 'image_dir'))
try:
download(photo_url, dest) # depends on [control=['try'], data=[]]
except IOError:
stderr_and_exit('Error downloading image.\n') # depends on [control=['except'], data=[]] |
def _multiple_replace(text, search_replace_dict):
"""
Replace multiple things at once in a text.
Parameters
----------
text : str
search_replace_dict : dict
Returns
-------
replaced_text : str
Examples
--------
>>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'}
>>> _multiple_replace('abcdefghijklm', d)
'bcdeefghijklm'
"""
# Create a regular expression from all of the dictionary keys
regex = re.compile("|".join(map(re.escape, search_replace_dict.keys())))
# For each match, look up the corresponding value in the dictionary
return regex.sub(lambda match: search_replace_dict[match.group(0)], text) | def function[_multiple_replace, parameter[text, search_replace_dict]]:
constant[
Replace multiple things at once in a text.
Parameters
----------
text : str
search_replace_dict : dict
Returns
-------
replaced_text : str
Examples
--------
>>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'}
>>> _multiple_replace('abcdefghijklm', d)
'bcdeefghijklm'
]
variable[regex] assign[=] call[name[re].compile, parameter[call[constant[|].join, parameter[call[name[map], parameter[name[re].escape, call[name[search_replace_dict].keys, parameter[]]]]]]]]
return[call[name[regex].sub, parameter[<ast.Lambda object at 0x7da18dc99870>, name[text]]]] | keyword[def] identifier[_multiple_replace] ( identifier[text] , identifier[search_replace_dict] ):
literal[string]
identifier[regex] = identifier[re] . identifier[compile] ( literal[string] . identifier[join] ( identifier[map] ( identifier[re] . identifier[escape] , identifier[search_replace_dict] . identifier[keys] ())))
keyword[return] identifier[regex] . identifier[sub] ( keyword[lambda] identifier[match] : identifier[search_replace_dict] [ identifier[match] . identifier[group] ( literal[int] )], identifier[text] ) | def _multiple_replace(text, search_replace_dict):
"""
Replace multiple things at once in a text.
Parameters
----------
text : str
search_replace_dict : dict
Returns
-------
replaced_text : str
Examples
--------
>>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'}
>>> _multiple_replace('abcdefghijklm', d)
'bcdeefghijklm'
"""
# Create a regular expression from all of the dictionary keys
regex = re.compile('|'.join(map(re.escape, search_replace_dict.keys())))
# For each match, look up the corresponding value in the dictionary
return regex.sub(lambda match: search_replace_dict[match.group(0)], text) |
def epd_magseries_extparams(
times,
mags,
errs,
externalparam_arrs,
initial_coeff_guess,
magsarefluxes=False,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
objective_func=_epd_residual2,
objective_kwargs=None,
optimizer_func=least_squares,
optimizer_kwargs=None,
):
'''This does EPD on a mag-series with arbitrary external parameters.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run EPD on.
externalparam_arrs : list of np.arrays
This is a list of ndarrays of external parameters to decorrelate
against. These should all be the same size as `times`, `mags`, `errs`.
initial_coeff_guess : np.array
An array of initial fit coefficients to pass into the objective
function.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before smoothing it and
fitting the EPD function to it. The actual LC will not be sigma-clipped.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
objective_func : Python function
The function that calculates residuals between the model and the
smoothed mag-series. This must have the following signature::
def objective_func(fit_coeffs,
times,
mags,
errs,
*external_params,
**objective_kwargs)
where `times`, `mags`, `errs` are arrays of the sigma-clipped and
smoothed time-series, `fit_coeffs` is an array of EPD fit coefficients,
`external_params` is a tuple of the passed in external parameter arrays,
and `objective_kwargs` is a dict of any optional kwargs to pass into the
objective function.
This should return the value of the residual based on evaluating the
model function (and any weights based on errs or times).
objective_kwargs : dict or None
A dict of kwargs to pass into the `objective_func` function.
optimizer_func : Python function
The function that minimizes the residual between the model and the
smoothed mag-series using the `objective_func`. This should have a
signature similar to one of the optimizer functions in `scipy.optimize
<https://docs.scipy.org/doc/scipy/reference/optimize.html>`_, i.e.::
def optimizer_func(objective_func, initial_coeffs, args=(), ...)
and return a `scipy.optimize.OptimizeResult
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`_. We'll
rely on the ``.success`` attribute to determine if the EPD fit was
successful, and the ``.x`` attribute to get the values of the fit
coefficients.
optimizer_kwargs : dict or None
A dict of kwargs to pass into the `optimizer_func` function.
Returns
-------
dict
Returns a dict of the following form::
{'times':the input times after non-finite elems removed,
'mags':the EPD detrended mag values (the EPD mags),
'errs':the errs after non-finite elems removed,
'fitcoeffs':EPD fit coefficient values,
'fitinfo':the result returned by the optimizer function,
'mags_median': this is the median of the EPD mags,
'mags_mad': this is the MAD of EPD mags}
'''
# get finite times, mags, errs
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[::][finind], mags[::][finind], errs[::][finind]
finalparam_arrs = []
for ep in externalparam_arrs:
finalparam_arrs.append(ep[::][finind])
# sigclip the LC to pass into the smoothing for EPD fit
stimes, smags, serrs, eparams = sigclip_magseries_with_extparams(
times.copy(), mags.copy(), errs.copy(),
[x.copy() for x in externalparam_arrs],
sigclip=epdsmooth_sigclip,
magsarefluxes=magsarefluxes
)
# smooth the signal before fitting the function to it
if isinstance(epdsmooth_extraparams, dict):
smoothedmags = epdsmooth_func(smags,
epdsmooth_windowsize,
**epdsmooth_extraparams)
else:
smoothedmags = epdsmooth_func(smags,
epdsmooth_windowsize)
# the initial coeffs are passed in here
initial_coeffs = initial_coeff_guess
# reform the objective function with any optional kwargs
if objective_kwargs is not None:
obj_func = partial(objective_func, **objective_kwargs)
else:
obj_func = objective_func
# run the optimizer function by passing in the objective function, the
# coeffs, and the smoothed mags and external params as part of the `args`
# tuple
if not optimizer_kwargs:
optimizer_kwargs = {}
fit_info = optimizer_func(
obj_func,
initial_coeffs,
args=(stimes, smoothedmags, serrs, *eparams),
**optimizer_kwargs
)
if fit_info.success:
fit_coeffs = fit_info.x
epd_mags = np.median(fmags) + obj_func(fit_coeffs,
ftimes,
fmags,
ferrs,
*finalparam_arrs)
retdict = {'times':ftimes,
'mags':epd_mags,
'errs':ferrs,
'fitcoeffs':fit_coeffs,
'fitinfo':fit_info,
'mags_median':npmedian(epd_mags),
'mags_mad':npmedian(npabs(epd_mags - npmedian(epd_mags)))}
return retdict
# if the solution fails, return nothing
else:
LOGERROR('EPD fit did not converge')
return None | def function[epd_magseries_extparams, parameter[times, mags, errs, externalparam_arrs, initial_coeff_guess, magsarefluxes, epdsmooth_sigclip, epdsmooth_windowsize, epdsmooth_func, epdsmooth_extraparams, objective_func, objective_kwargs, optimizer_func, optimizer_kwargs]]:
constant[This does EPD on a mag-series with arbitrary external parameters.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run EPD on.
externalparam_arrs : list of np.arrays
This is a list of ndarrays of external parameters to decorrelate
against. These should all be the same size as `times`, `mags`, `errs`.
initial_coeff_guess : np.array
An array of initial fit coefficients to pass into the objective
function.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before smoothing it and
fitting the EPD function to it. The actual LC will not be sigma-clipped.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
objective_func : Python function
The function that calculates residuals between the model and the
smoothed mag-series. This must have the following signature::
def objective_func(fit_coeffs,
times,
mags,
errs,
*external_params,
**objective_kwargs)
where `times`, `mags`, `errs` are arrays of the sigma-clipped and
smoothed time-series, `fit_coeffs` is an array of EPD fit coefficients,
`external_params` is a tuple of the passed in external parameter arrays,
and `objective_kwargs` is a dict of any optional kwargs to pass into the
objective function.
This should return the value of the residual based on evaluating the
model function (and any weights based on errs or times).
objective_kwargs : dict or None
A dict of kwargs to pass into the `objective_func` function.
optimizer_func : Python function
The function that minimizes the residual between the model and the
smoothed mag-series using the `objective_func`. This should have a
signature similar to one of the optimizer functions in `scipy.optimize
<https://docs.scipy.org/doc/scipy/reference/optimize.html>`_, i.e.::
def optimizer_func(objective_func, initial_coeffs, args=(), ...)
and return a `scipy.optimize.OptimizeResult
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`_. We'll
rely on the ``.success`` attribute to determine if the EPD fit was
successful, and the ``.x`` attribute to get the values of the fit
coefficients.
optimizer_kwargs : dict or None
A dict of kwargs to pass into the `optimizer_func` function.
Returns
-------
dict
Returns a dict of the following form::
{'times':the input times after non-finite elems removed,
'mags':the EPD detrended mag values (the EPD mags),
'errs':the errs after non-finite elems removed,
'fitcoeffs':EPD fit coefficient values,
'fitinfo':the result returned by the optimizer function,
'mags_median': this is the median of the EPD mags,
'mags_mad': this is the MAD of EPD mags}
]
variable[finind] assign[=] binary_operation[binary_operation[call[name[np].isfinite, parameter[name[times]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[np].isfinite, parameter[name[mags]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[np].isfinite, parameter[name[errs]]]]
<ast.Tuple object at 0x7da2044c0700> assign[=] tuple[[<ast.Subscript object at 0x7da2044c2440>, <ast.Subscript object at 0x7da2044c2b30>, <ast.Subscript object at 0x7da2044c1420>]]
variable[finalparam_arrs] assign[=] list[[]]
for taget[name[ep]] in starred[name[externalparam_arrs]] begin[:]
call[name[finalparam_arrs].append, parameter[call[call[name[ep]][<ast.Slice object at 0x7da2044c3340>]][name[finind]]]]
<ast.Tuple object at 0x7da2044c3580> assign[=] call[name[sigclip_magseries_with_extparams], parameter[call[name[times].copy, parameter[]], call[name[mags].copy, parameter[]], call[name[errs].copy, parameter[]], <ast.ListComp object at 0x7da1b017b970>]]
if call[name[isinstance], parameter[name[epdsmooth_extraparams], name[dict]]] begin[:]
variable[smoothedmags] assign[=] call[name[epdsmooth_func], parameter[name[smags], name[epdsmooth_windowsize]]]
variable[initial_coeffs] assign[=] name[initial_coeff_guess]
if compare[name[objective_kwargs] is_not constant[None]] begin[:]
variable[obj_func] assign[=] call[name[partial], parameter[name[objective_func]]]
if <ast.UnaryOp object at 0x7da1b017a980> begin[:]
variable[optimizer_kwargs] assign[=] dictionary[[], []]
variable[fit_info] assign[=] call[name[optimizer_func], parameter[name[obj_func], name[initial_coeffs]]]
if name[fit_info].success begin[:]
variable[fit_coeffs] assign[=] name[fit_info].x
variable[epd_mags] assign[=] binary_operation[call[name[np].median, parameter[name[fmags]]] + call[name[obj_func], parameter[name[fit_coeffs], name[ftimes], name[fmags], name[ferrs], <ast.Starred object at 0x7da1b017bd30>]]]
variable[retdict] assign[=] dictionary[[<ast.Constant object at 0x7da1b017bc40>, <ast.Constant object at 0x7da1b017bc10>, <ast.Constant object at 0x7da1b017bbe0>, <ast.Constant object at 0x7da1b017bbb0>, <ast.Constant object at 0x7da1b017bb80>, <ast.Constant object at 0x7da1b017bb50>, <ast.Constant object at 0x7da1b017bb20>], [<ast.Name object at 0x7da1b017baf0>, <ast.Name object at 0x7da1b017bac0>, <ast.Name object at 0x7da207f02fe0>, <ast.Name object at 0x7da207f01570>, <ast.Name object at 0x7da207f02fb0>, <ast.Call object at 0x7da207f02a10>, <ast.Call object at 0x7da207f01b70>]]
return[name[retdict]] | keyword[def] identifier[epd_magseries_extparams] (
identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[externalparam_arrs] ,
identifier[initial_coeff_guess] ,
identifier[magsarefluxes] = keyword[False] ,
identifier[epdsmooth_sigclip] = literal[int] ,
identifier[epdsmooth_windowsize] = literal[int] ,
identifier[epdsmooth_func] = identifier[smooth_magseries_savgol] ,
identifier[epdsmooth_extraparams] = keyword[None] ,
identifier[objective_func] = identifier[_epd_residual2] ,
identifier[objective_kwargs] = keyword[None] ,
identifier[optimizer_func] = identifier[least_squares] ,
identifier[optimizer_kwargs] = keyword[None] ,
):
literal[string]
identifier[finind] = identifier[np] . identifier[isfinite] ( identifier[times] )& identifier[np] . identifier[isfinite] ( identifier[mags] )& identifier[np] . identifier[isfinite] ( identifier[errs] )
identifier[ftimes] , identifier[fmags] , identifier[ferrs] = identifier[times] [::][ identifier[finind] ], identifier[mags] [::][ identifier[finind] ], identifier[errs] [::][ identifier[finind] ]
identifier[finalparam_arrs] =[]
keyword[for] identifier[ep] keyword[in] identifier[externalparam_arrs] :
identifier[finalparam_arrs] . identifier[append] ( identifier[ep] [::][ identifier[finind] ])
identifier[stimes] , identifier[smags] , identifier[serrs] , identifier[eparams] = identifier[sigclip_magseries_with_extparams] (
identifier[times] . identifier[copy] (), identifier[mags] . identifier[copy] (), identifier[errs] . identifier[copy] (),
[ identifier[x] . identifier[copy] () keyword[for] identifier[x] keyword[in] identifier[externalparam_arrs] ],
identifier[sigclip] = identifier[epdsmooth_sigclip] ,
identifier[magsarefluxes] = identifier[magsarefluxes]
)
keyword[if] identifier[isinstance] ( identifier[epdsmooth_extraparams] , identifier[dict] ):
identifier[smoothedmags] = identifier[epdsmooth_func] ( identifier[smags] ,
identifier[epdsmooth_windowsize] ,
** identifier[epdsmooth_extraparams] )
keyword[else] :
identifier[smoothedmags] = identifier[epdsmooth_func] ( identifier[smags] ,
identifier[epdsmooth_windowsize] )
identifier[initial_coeffs] = identifier[initial_coeff_guess]
keyword[if] identifier[objective_kwargs] keyword[is] keyword[not] keyword[None] :
identifier[obj_func] = identifier[partial] ( identifier[objective_func] ,** identifier[objective_kwargs] )
keyword[else] :
identifier[obj_func] = identifier[objective_func]
keyword[if] keyword[not] identifier[optimizer_kwargs] :
identifier[optimizer_kwargs] ={}
identifier[fit_info] = identifier[optimizer_func] (
identifier[obj_func] ,
identifier[initial_coeffs] ,
identifier[args] =( identifier[stimes] , identifier[smoothedmags] , identifier[serrs] ,* identifier[eparams] ),
** identifier[optimizer_kwargs]
)
keyword[if] identifier[fit_info] . identifier[success] :
identifier[fit_coeffs] = identifier[fit_info] . identifier[x]
identifier[epd_mags] = identifier[np] . identifier[median] ( identifier[fmags] )+ identifier[obj_func] ( identifier[fit_coeffs] ,
identifier[ftimes] ,
identifier[fmags] ,
identifier[ferrs] ,
* identifier[finalparam_arrs] )
identifier[retdict] ={ literal[string] : identifier[ftimes] ,
literal[string] : identifier[epd_mags] ,
literal[string] : identifier[ferrs] ,
literal[string] : identifier[fit_coeffs] ,
literal[string] : identifier[fit_info] ,
literal[string] : identifier[npmedian] ( identifier[epd_mags] ),
literal[string] : identifier[npmedian] ( identifier[npabs] ( identifier[epd_mags] - identifier[npmedian] ( identifier[epd_mags] )))}
keyword[return] identifier[retdict]
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] keyword[None] | def epd_magseries_extparams(times, mags, errs, externalparam_arrs, initial_coeff_guess, magsarefluxes=False, epdsmooth_sigclip=3.0, epdsmooth_windowsize=21, epdsmooth_func=smooth_magseries_savgol, epdsmooth_extraparams=None, objective_func=_epd_residual2, objective_kwargs=None, optimizer_func=least_squares, optimizer_kwargs=None):
"""This does EPD on a mag-series with arbitrary external parameters.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run EPD on.
externalparam_arrs : list of np.arrays
This is a list of ndarrays of external parameters to decorrelate
against. These should all be the same size as `times`, `mags`, `errs`.
initial_coeff_guess : np.array
An array of initial fit coefficients to pass into the objective
function.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before smoothing it and
fitting the EPD function to it. The actual LC will not be sigma-clipped.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
objective_func : Python function
The function that calculates residuals between the model and the
smoothed mag-series. This must have the following signature::
def objective_func(fit_coeffs,
times,
mags,
errs,
*external_params,
**objective_kwargs)
where `times`, `mags`, `errs` are arrays of the sigma-clipped and
smoothed time-series, `fit_coeffs` is an array of EPD fit coefficients,
`external_params` is a tuple of the passed in external parameter arrays,
and `objective_kwargs` is a dict of any optional kwargs to pass into the
objective function.
This should return the value of the residual based on evaluating the
model function (and any weights based on errs or times).
objective_kwargs : dict or None
A dict of kwargs to pass into the `objective_func` function.
optimizer_func : Python function
The function that minimizes the residual between the model and the
smoothed mag-series using the `objective_func`. This should have a
signature similar to one of the optimizer functions in `scipy.optimize
<https://docs.scipy.org/doc/scipy/reference/optimize.html>`_, i.e.::
def optimizer_func(objective_func, initial_coeffs, args=(), ...)
and return a `scipy.optimize.OptimizeResult
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`_. We'll
rely on the ``.success`` attribute to determine if the EPD fit was
successful, and the ``.x`` attribute to get the values of the fit
coefficients.
optimizer_kwargs : dict or None
A dict of kwargs to pass into the `optimizer_func` function.
Returns
-------
dict
Returns a dict of the following form::
{'times':the input times after non-finite elems removed,
'mags':the EPD detrended mag values (the EPD mags),
'errs':the errs after non-finite elems removed,
'fitcoeffs':EPD fit coefficient values,
'fitinfo':the result returned by the optimizer function,
'mags_median': this is the median of the EPD mags,
'mags_mad': this is the MAD of EPD mags}
"""
# get finite times, mags, errs
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
(ftimes, fmags, ferrs) = (times[:][finind], mags[:][finind], errs[:][finind])
finalparam_arrs = []
for ep in externalparam_arrs:
finalparam_arrs.append(ep[:][finind]) # depends on [control=['for'], data=['ep']]
# sigclip the LC to pass into the smoothing for EPD fit
(stimes, smags, serrs, eparams) = sigclip_magseries_with_extparams(times.copy(), mags.copy(), errs.copy(), [x.copy() for x in externalparam_arrs], sigclip=epdsmooth_sigclip, magsarefluxes=magsarefluxes)
# smooth the signal before fitting the function to it
if isinstance(epdsmooth_extraparams, dict):
smoothedmags = epdsmooth_func(smags, epdsmooth_windowsize, **epdsmooth_extraparams) # depends on [control=['if'], data=[]]
else:
smoothedmags = epdsmooth_func(smags, epdsmooth_windowsize)
# the initial coeffs are passed in here
initial_coeffs = initial_coeff_guess
# reform the objective function with any optional kwargs
if objective_kwargs is not None:
obj_func = partial(objective_func, **objective_kwargs) # depends on [control=['if'], data=['objective_kwargs']]
else:
obj_func = objective_func
# run the optimizer function by passing in the objective function, the
# coeffs, and the smoothed mags and external params as part of the `args`
# tuple
if not optimizer_kwargs:
optimizer_kwargs = {} # depends on [control=['if'], data=[]]
fit_info = optimizer_func(obj_func, initial_coeffs, args=(stimes, smoothedmags, serrs, *eparams), **optimizer_kwargs)
if fit_info.success:
fit_coeffs = fit_info.x
epd_mags = np.median(fmags) + obj_func(fit_coeffs, ftimes, fmags, ferrs, *finalparam_arrs)
retdict = {'times': ftimes, 'mags': epd_mags, 'errs': ferrs, 'fitcoeffs': fit_coeffs, 'fitinfo': fit_info, 'mags_median': npmedian(epd_mags), 'mags_mad': npmedian(npabs(epd_mags - npmedian(epd_mags)))}
return retdict # depends on [control=['if'], data=[]]
else:
# if the solution fails, return nothing
LOGERROR('EPD fit did not converge')
return None |
def rot3(theta):
"""
Args:
theta (float): Angle in radians
Return:
Rotation matrix of angle theta around the Z-axis
"""
return np.array([
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1]
]) | def function[rot3, parameter[theta]]:
constant[
Args:
theta (float): Angle in radians
Return:
Rotation matrix of angle theta around the Z-axis
]
return[call[name[np].array, parameter[list[[<ast.List object at 0x7da1b0c4e260>, <ast.List object at 0x7da1b0c4efe0>, <ast.List object at 0x7da1b0c4dde0>]]]]] | keyword[def] identifier[rot3] ( identifier[theta] ):
literal[string]
keyword[return] identifier[np] . identifier[array] ([
[ identifier[np] . identifier[cos] ( identifier[theta] ), identifier[np] . identifier[sin] ( identifier[theta] ), literal[int] ],
[- identifier[np] . identifier[sin] ( identifier[theta] ), identifier[np] . identifier[cos] ( identifier[theta] ), literal[int] ],
[ literal[int] , literal[int] , literal[int] ]
]) | def rot3(theta):
"""
Args:
theta (float): Angle in radians
Return:
Rotation matrix of angle theta around the Z-axis
"""
return np.array([[np.cos(theta), np.sin(theta), 0], [-np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) |
def load_sound_font(self, sf2):
"""Load a sound font.
Return True on success, False on failure.
This function should be called before your audio can be played,
since the instruments are kept in the sf2 file.
"""
self.sfid = self.fs.sfload(sf2)
return not self.sfid == -1 | def function[load_sound_font, parameter[self, sf2]]:
constant[Load a sound font.
Return True on success, False on failure.
This function should be called before your audio can be played,
since the instruments are kept in the sf2 file.
]
name[self].sfid assign[=] call[name[self].fs.sfload, parameter[name[sf2]]]
return[<ast.UnaryOp object at 0x7da1b13d5810>] | keyword[def] identifier[load_sound_font] ( identifier[self] , identifier[sf2] ):
literal[string]
identifier[self] . identifier[sfid] = identifier[self] . identifier[fs] . identifier[sfload] ( identifier[sf2] )
keyword[return] keyword[not] identifier[self] . identifier[sfid] ==- literal[int] | def load_sound_font(self, sf2):
"""Load a sound font.
Return True on success, False on failure.
This function should be called before your audio can be played,
since the instruments are kept in the sf2 file.
"""
self.sfid = self.fs.sfload(sf2)
return not self.sfid == -1 |
def to_meta(self, md5=None, file=None):
"""Return a dictionary of metadata, for use in the Remote api."""
# from collections import OrderedDict
if not md5:
if not file:
raise ValueError('Must specify either file or md5')
md5 = md5_for_file(file)
size = os.stat(file).st_size
else:
size = None
return {
'id': self.id_,
'identity': json.dumps(self.dict),
'name': self.sname,
'fqname': self.fqname,
'md5': md5,
# This causes errors with calculating the AWS signature
'size': size
} | def function[to_meta, parameter[self, md5, file]]:
constant[Return a dictionary of metadata, for use in the Remote api.]
if <ast.UnaryOp object at 0x7da20e956ef0> begin[:]
if <ast.UnaryOp object at 0x7da20e957880> begin[:]
<ast.Raise object at 0x7da20e954eb0>
variable[md5] assign[=] call[name[md5_for_file], parameter[name[file]]]
variable[size] assign[=] call[name[os].stat, parameter[name[file]]].st_size
return[dictionary[[<ast.Constant object at 0x7da18c4cded0>, <ast.Constant object at 0x7da18c4ccc70>, <ast.Constant object at 0x7da18c4cece0>, <ast.Constant object at 0x7da18c4cf6d0>, <ast.Constant object at 0x7da18c4cdd80>, <ast.Constant object at 0x7da18c4ceb00>], [<ast.Attribute object at 0x7da18c4cefe0>, <ast.Call object at 0x7da18c4ccf10>, <ast.Attribute object at 0x7da18c4cc7c0>, <ast.Attribute object at 0x7da18c4ce470>, <ast.Name object at 0x7da18c4ce9b0>, <ast.Name object at 0x7da18c4cf010>]]] | keyword[def] identifier[to_meta] ( identifier[self] , identifier[md5] = keyword[None] , identifier[file] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[md5] :
keyword[if] keyword[not] identifier[file] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[md5] = identifier[md5_for_file] ( identifier[file] )
identifier[size] = identifier[os] . identifier[stat] ( identifier[file] ). identifier[st_size]
keyword[else] :
identifier[size] = keyword[None]
keyword[return] {
literal[string] : identifier[self] . identifier[id_] ,
literal[string] : identifier[json] . identifier[dumps] ( identifier[self] . identifier[dict] ),
literal[string] : identifier[self] . identifier[sname] ,
literal[string] : identifier[self] . identifier[fqname] ,
literal[string] : identifier[md5] ,
literal[string] : identifier[size]
} | def to_meta(self, md5=None, file=None):
"""Return a dictionary of metadata, for use in the Remote api."""
# from collections import OrderedDict
if not md5:
if not file:
raise ValueError('Must specify either file or md5') # depends on [control=['if'], data=[]]
md5 = md5_for_file(file)
size = os.stat(file).st_size # depends on [control=['if'], data=[]]
else:
size = None
# This causes errors with calculating the AWS signature
return {'id': self.id_, 'identity': json.dumps(self.dict), 'name': self.sname, 'fqname': self.fqname, 'md5': md5, 'size': size} |
def encode(plaintext, code):
"Encodes text, using a code which is a permutation of the alphabet."
from string import maketrans
trans = maketrans(alphabet + alphabet.upper(), code + code.upper())
return plaintext.translate(trans) | def function[encode, parameter[plaintext, code]]:
constant[Encodes text, using a code which is a permutation of the alphabet.]
from relative_module[string] import module[maketrans]
variable[trans] assign[=] call[name[maketrans], parameter[binary_operation[name[alphabet] + call[name[alphabet].upper, parameter[]]], binary_operation[name[code] + call[name[code].upper, parameter[]]]]]
return[call[name[plaintext].translate, parameter[name[trans]]]] | keyword[def] identifier[encode] ( identifier[plaintext] , identifier[code] ):
literal[string]
keyword[from] identifier[string] keyword[import] identifier[maketrans]
identifier[trans] = identifier[maketrans] ( identifier[alphabet] + identifier[alphabet] . identifier[upper] (), identifier[code] + identifier[code] . identifier[upper] ())
keyword[return] identifier[plaintext] . identifier[translate] ( identifier[trans] ) | def encode(plaintext, code):
"""Encodes text, using a code which is a permutation of the alphabet."""
from string import maketrans
trans = maketrans(alphabet + alphabet.upper(), code + code.upper())
return plaintext.translate(trans) |
def sample(self, n, mass_min=0.1, mass_max=10., steps=10000, seed=None):
"""
Sample initial mass values between mass_min and mass_max,
following the IMF distribution.
ADW: Should this be `sample` or `simulate`?
Parameters:
-----------
n : number of samples to draw
mass_min : minimum mass to sample from
mass_max : maximum mass to sample from
steps : number of steps for isochrone sampling
seed : random seed (passed to np.random.seed)
Returns:
--------
mass : array of randomly sampled mass values
"""
if seed is not None: np.random.seed(seed)
d_mass = (mass_max - mass_min) / float(steps)
mass = np.linspace(mass_min, mass_max, steps)
cdf = np.insert(np.cumsum(d_mass * self.pdf(mass[1:], log_mode=False)), 0, 0.)
cdf = cdf / cdf[-1]
f = scipy.interpolate.interp1d(cdf, mass)
return f(np.random.uniform(size=n)) | def function[sample, parameter[self, n, mass_min, mass_max, steps, seed]]:
constant[
Sample initial mass values between mass_min and mass_max,
following the IMF distribution.
ADW: Should this be `sample` or `simulate`?
Parameters:
-----------
n : number of samples to draw
mass_min : minimum mass to sample from
mass_max : maximum mass to sample from
steps : number of steps for isochrone sampling
seed : random seed (passed to np.random.seed)
Returns:
--------
mass : array of randomly sampled mass values
]
if compare[name[seed] is_not constant[None]] begin[:]
call[name[np].random.seed, parameter[name[seed]]]
variable[d_mass] assign[=] binary_operation[binary_operation[name[mass_max] - name[mass_min]] / call[name[float], parameter[name[steps]]]]
variable[mass] assign[=] call[name[np].linspace, parameter[name[mass_min], name[mass_max], name[steps]]]
variable[cdf] assign[=] call[name[np].insert, parameter[call[name[np].cumsum, parameter[binary_operation[name[d_mass] * call[name[self].pdf, parameter[call[name[mass]][<ast.Slice object at 0x7da1b24ae3b0>]]]]]], constant[0], constant[0.0]]]
variable[cdf] assign[=] binary_operation[name[cdf] / call[name[cdf]][<ast.UnaryOp object at 0x7da1b24aed70>]]
variable[f] assign[=] call[name[scipy].interpolate.interp1d, parameter[name[cdf], name[mass]]]
return[call[name[f], parameter[call[name[np].random.uniform, parameter[]]]]] | keyword[def] identifier[sample] ( identifier[self] , identifier[n] , identifier[mass_min] = literal[int] , identifier[mass_max] = literal[int] , identifier[steps] = literal[int] , identifier[seed] = keyword[None] ):
literal[string]
keyword[if] identifier[seed] keyword[is] keyword[not] keyword[None] : identifier[np] . identifier[random] . identifier[seed] ( identifier[seed] )
identifier[d_mass] =( identifier[mass_max] - identifier[mass_min] )/ identifier[float] ( identifier[steps] )
identifier[mass] = identifier[np] . identifier[linspace] ( identifier[mass_min] , identifier[mass_max] , identifier[steps] )
identifier[cdf] = identifier[np] . identifier[insert] ( identifier[np] . identifier[cumsum] ( identifier[d_mass] * identifier[self] . identifier[pdf] ( identifier[mass] [ literal[int] :], identifier[log_mode] = keyword[False] )), literal[int] , literal[int] )
identifier[cdf] = identifier[cdf] / identifier[cdf] [- literal[int] ]
identifier[f] = identifier[scipy] . identifier[interpolate] . identifier[interp1d] ( identifier[cdf] , identifier[mass] )
keyword[return] identifier[f] ( identifier[np] . identifier[random] . identifier[uniform] ( identifier[size] = identifier[n] )) | def sample(self, n, mass_min=0.1, mass_max=10.0, steps=10000, seed=None):
"""
Sample initial mass values between mass_min and mass_max,
following the IMF distribution.
ADW: Should this be `sample` or `simulate`?
Parameters:
-----------
n : number of samples to draw
mass_min : minimum mass to sample from
mass_max : maximum mass to sample from
steps : number of steps for isochrone sampling
seed : random seed (passed to np.random.seed)
Returns:
--------
mass : array of randomly sampled mass values
"""
if seed is not None:
np.random.seed(seed) # depends on [control=['if'], data=['seed']]
d_mass = (mass_max - mass_min) / float(steps)
mass = np.linspace(mass_min, mass_max, steps)
cdf = np.insert(np.cumsum(d_mass * self.pdf(mass[1:], log_mode=False)), 0, 0.0)
cdf = cdf / cdf[-1]
f = scipy.interpolate.interp1d(cdf, mass)
return f(np.random.uniform(size=n)) |
def compare_dicts(dict1, dict2):
"""
Checks if dict1 equals dict2
"""
for k, v in dict2.items():
if v != dict1[k]:
return False
return True | def function[compare_dicts, parameter[dict1, dict2]]:
constant[
Checks if dict1 equals dict2
]
for taget[tuple[[<ast.Name object at 0x7da1b131bf70>, <ast.Name object at 0x7da1b131ad10>]]] in starred[call[name[dict2].items, parameter[]]] begin[:]
if compare[name[v] not_equal[!=] call[name[dict1]][name[k]]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[compare_dicts] ( identifier[dict1] , identifier[dict2] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dict2] . identifier[items] ():
keyword[if] identifier[v] != identifier[dict1] [ identifier[k] ]:
keyword[return] keyword[False]
keyword[return] keyword[True] | def compare_dicts(dict1, dict2):
"""
Checks if dict1 equals dict2
"""
for (k, v) in dict2.items():
if v != dict1[k]:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return True |
def run_task(self, name):
"""Run a task."""
if name in self.tasks_run:
yield events.skipping_task(name)
else:
yield events.finding_task(name)
try:
task = self.find_task(name)
except NoSuchTaskError as e:
yield events.task_not_found(name, e.similarities)
raise StopTask
yield events.starting_task(task)
for name in task.dependencies:
yield from self.run_task(name)
self.tasks_run.append(name)
yield events.running_task(task)
yield from self.run_task_steps(task)
yield events.finished_task(task) | def function[run_task, parameter[self, name]]:
constant[Run a task.]
if compare[name[name] in name[self].tasks_run] begin[:]
<ast.Yield object at 0x7da20c992650> | keyword[def] identifier[run_task] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[tasks_run] :
keyword[yield] identifier[events] . identifier[skipping_task] ( identifier[name] )
keyword[else] :
keyword[yield] identifier[events] . identifier[finding_task] ( identifier[name] )
keyword[try] :
identifier[task] = identifier[self] . identifier[find_task] ( identifier[name] )
keyword[except] identifier[NoSuchTaskError] keyword[as] identifier[e] :
keyword[yield] identifier[events] . identifier[task_not_found] ( identifier[name] , identifier[e] . identifier[similarities] )
keyword[raise] identifier[StopTask]
keyword[yield] identifier[events] . identifier[starting_task] ( identifier[task] )
keyword[for] identifier[name] keyword[in] identifier[task] . identifier[dependencies] :
keyword[yield] keyword[from] identifier[self] . identifier[run_task] ( identifier[name] )
identifier[self] . identifier[tasks_run] . identifier[append] ( identifier[name] )
keyword[yield] identifier[events] . identifier[running_task] ( identifier[task] )
keyword[yield] keyword[from] identifier[self] . identifier[run_task_steps] ( identifier[task] )
keyword[yield] identifier[events] . identifier[finished_task] ( identifier[task] ) | def run_task(self, name):
"""Run a task."""
if name in self.tasks_run:
yield events.skipping_task(name) # depends on [control=['if'], data=['name']]
else:
yield events.finding_task(name)
try:
task = self.find_task(name) # depends on [control=['try'], data=[]]
except NoSuchTaskError as e:
yield events.task_not_found(name, e.similarities)
raise StopTask # depends on [control=['except'], data=['e']]
yield events.starting_task(task)
for name in task.dependencies:
yield from self.run_task(name) # depends on [control=['for'], data=['name']]
self.tasks_run.append(name)
yield events.running_task(task)
yield from self.run_task_steps(task)
yield events.finished_task(task) |
def remove_container(self, path):
"""
Removes the container at the specified path.
:param path: str or Path instance
:raises ValueError: A component of path is a field name.
:raises KeyError: A component of path doesn't exist.
"""
path = make_path(path)
container = self
parent = None
for segment in path:
parent = container
try:
container = container._values[segment]
if not isinstance(container, ValueTree):
raise ValueError()
except KeyError:
raise KeyError()
del parent._values[path.segments[-1]] | def function[remove_container, parameter[self, path]]:
constant[
Removes the container at the specified path.
:param path: str or Path instance
:raises ValueError: A component of path is a field name.
:raises KeyError: A component of path doesn't exist.
]
variable[path] assign[=] call[name[make_path], parameter[name[path]]]
variable[container] assign[=] name[self]
variable[parent] assign[=] constant[None]
for taget[name[segment]] in starred[name[path]] begin[:]
variable[parent] assign[=] name[container]
<ast.Try object at 0x7da20c76e200>
<ast.Delete object at 0x7da20c6a9ed0> | keyword[def] identifier[remove_container] ( identifier[self] , identifier[path] ):
literal[string]
identifier[path] = identifier[make_path] ( identifier[path] )
identifier[container] = identifier[self]
identifier[parent] = keyword[None]
keyword[for] identifier[segment] keyword[in] identifier[path] :
identifier[parent] = identifier[container]
keyword[try] :
identifier[container] = identifier[container] . identifier[_values] [ identifier[segment] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[container] , identifier[ValueTree] ):
keyword[raise] identifier[ValueError] ()
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ()
keyword[del] identifier[parent] . identifier[_values] [ identifier[path] . identifier[segments] [- literal[int] ]] | def remove_container(self, path):
"""
Removes the container at the specified path.
:param path: str or Path instance
:raises ValueError: A component of path is a field name.
:raises KeyError: A component of path doesn't exist.
"""
path = make_path(path)
container = self
parent = None
for segment in path:
parent = container
try:
container = container._values[segment]
if not isinstance(container, ValueTree):
raise ValueError() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError() # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['segment']]
del parent._values[path.segments[-1]] |
def filter_star_import(line, marked_star_import_undefined_name):
"""Return line with the star import expanded."""
undefined_name = sorted(set(marked_star_import_undefined_name))
return re.sub(r'\*', ', '.join(undefined_name), line) | def function[filter_star_import, parameter[line, marked_star_import_undefined_name]]:
constant[Return line with the star import expanded.]
variable[undefined_name] assign[=] call[name[sorted], parameter[call[name[set], parameter[name[marked_star_import_undefined_name]]]]]
return[call[name[re].sub, parameter[constant[\*], call[constant[, ].join, parameter[name[undefined_name]]], name[line]]]] | keyword[def] identifier[filter_star_import] ( identifier[line] , identifier[marked_star_import_undefined_name] ):
literal[string]
identifier[undefined_name] = identifier[sorted] ( identifier[set] ( identifier[marked_star_import_undefined_name] ))
keyword[return] identifier[re] . identifier[sub] ( literal[string] , literal[string] . identifier[join] ( identifier[undefined_name] ), identifier[line] ) | def filter_star_import(line, marked_star_import_undefined_name):
"""Return line with the star import expanded."""
undefined_name = sorted(set(marked_star_import_undefined_name))
return re.sub('\\*', ', '.join(undefined_name), line) |
def flush(self):
"""
Flush all unwritten data to disk.
"""
if self._cache_modified_count > 0:
self.storage.write(self.cache)
self._cache_modified_count = 0 | def function[flush, parameter[self]]:
constant[
Flush all unwritten data to disk.
]
if compare[name[self]._cache_modified_count greater[>] constant[0]] begin[:]
call[name[self].storage.write, parameter[name[self].cache]]
name[self]._cache_modified_count assign[=] constant[0] | keyword[def] identifier[flush] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_cache_modified_count] > literal[int] :
identifier[self] . identifier[storage] . identifier[write] ( identifier[self] . identifier[cache] )
identifier[self] . identifier[_cache_modified_count] = literal[int] | def flush(self):
"""
Flush all unwritten data to disk.
"""
if self._cache_modified_count > 0:
self.storage.write(self.cache)
self._cache_modified_count = 0 # depends on [control=['if'], data=[]] |
def _finish_inheritance(self):
"""Finish those who still need to inherit."""
while self._inheritance_todos:
prototype, parent_id = self._inheritance_todos.pop()
parent = self._id_cache[parent_id]
prototype.inherit_from(parent) | def function[_finish_inheritance, parameter[self]]:
constant[Finish those who still need to inherit.]
while name[self]._inheritance_todos begin[:]
<ast.Tuple object at 0x7da1affc1f60> assign[=] call[name[self]._inheritance_todos.pop, parameter[]]
variable[parent] assign[=] call[name[self]._id_cache][name[parent_id]]
call[name[prototype].inherit_from, parameter[name[parent]]] | keyword[def] identifier[_finish_inheritance] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[_inheritance_todos] :
identifier[prototype] , identifier[parent_id] = identifier[self] . identifier[_inheritance_todos] . identifier[pop] ()
identifier[parent] = identifier[self] . identifier[_id_cache] [ identifier[parent_id] ]
identifier[prototype] . identifier[inherit_from] ( identifier[parent] ) | def _finish_inheritance(self):
"""Finish those who still need to inherit."""
while self._inheritance_todos:
(prototype, parent_id) = self._inheritance_todos.pop()
parent = self._id_cache[parent_id]
prototype.inherit_from(parent) # depends on [control=['while'], data=[]] |
def get_tunnel_info_output_tunnel_has_conflicts(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_info = ET.Element("get_tunnel_info")
config = get_tunnel_info
output = ET.SubElement(get_tunnel_info, "output")
tunnel = ET.SubElement(output, "tunnel")
has_conflicts = ET.SubElement(tunnel, "has-conflicts")
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_tunnel_info_output_tunnel_has_conflicts, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_tunnel_info] assign[=] call[name[ET].Element, parameter[constant[get_tunnel_info]]]
variable[config] assign[=] name[get_tunnel_info]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_tunnel_info], constant[output]]]
variable[tunnel] assign[=] call[name[ET].SubElement, parameter[name[output], constant[tunnel]]]
variable[has_conflicts] assign[=] call[name[ET].SubElement, parameter[name[tunnel], constant[has-conflicts]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_tunnel_info_output_tunnel_has_conflicts] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_tunnel_info] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_tunnel_info]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_tunnel_info] , literal[string] )
identifier[tunnel] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[has_conflicts] = identifier[ET] . identifier[SubElement] ( identifier[tunnel] , literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_tunnel_info_output_tunnel_has_conflicts(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_tunnel_info = ET.Element('get_tunnel_info')
config = get_tunnel_info
output = ET.SubElement(get_tunnel_info, 'output')
tunnel = ET.SubElement(output, 'tunnel')
has_conflicts = ET.SubElement(tunnel, 'has-conflicts')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def delete_resource_attribute(resource_attr_id, **kwargs):
"""
Deletes a resource attribute and all associated data.
"""
user_id = kwargs.get('user_id')
try:
ra = db.DBSession.query(ResourceAttr).filter(ResourceAttr.id == resource_attr_id).one()
except NoResultFound:
raise ResourceNotFoundError("Resource Attribute %s not found"%(resource_attr_id))
ra.check_write_permission(user_id)
db.DBSession.delete(ra)
db.DBSession.flush()
return 'OK' | def function[delete_resource_attribute, parameter[resource_attr_id]]:
constant[
Deletes a resource attribute and all associated data.
]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
<ast.Try object at 0x7da18f811930>
call[name[ra].check_write_permission, parameter[name[user_id]]]
call[name[db].DBSession.delete, parameter[name[ra]]]
call[name[db].DBSession.flush, parameter[]]
return[constant[OK]] | keyword[def] identifier[delete_resource_attribute] ( identifier[resource_attr_id] ,** identifier[kwargs] ):
literal[string]
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[try] :
identifier[ra] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[ResourceAttr] ). identifier[filter] ( identifier[ResourceAttr] . identifier[id] == identifier[resource_attr_id] ). identifier[one] ()
keyword[except] identifier[NoResultFound] :
keyword[raise] identifier[ResourceNotFoundError] ( literal[string] %( identifier[resource_attr_id] ))
identifier[ra] . identifier[check_write_permission] ( identifier[user_id] )
identifier[db] . identifier[DBSession] . identifier[delete] ( identifier[ra] )
identifier[db] . identifier[DBSession] . identifier[flush] ()
keyword[return] literal[string] | def delete_resource_attribute(resource_attr_id, **kwargs):
"""
Deletes a resource attribute and all associated data.
"""
user_id = kwargs.get('user_id')
try:
ra = db.DBSession.query(ResourceAttr).filter(ResourceAttr.id == resource_attr_id).one() # depends on [control=['try'], data=[]]
except NoResultFound:
raise ResourceNotFoundError('Resource Attribute %s not found' % resource_attr_id) # depends on [control=['except'], data=[]]
ra.check_write_permission(user_id)
db.DBSession.delete(ra)
db.DBSession.flush()
return 'OK' |
def change_interval(self, value):
""" :param value: list of strings in format 'Day_of_Week-HH:MM' """
assert not isinstance(value, string_types)
self.timestamps = []
for timestamp in value:
event = EventTime(timestamp)
self.timestamps.append(event) | def function[change_interval, parameter[self, value]]:
constant[ :param value: list of strings in format 'Day_of_Week-HH:MM' ]
assert[<ast.UnaryOp object at 0x7da1b24402b0>]
name[self].timestamps assign[=] list[[]]
for taget[name[timestamp]] in starred[name[value]] begin[:]
variable[event] assign[=] call[name[EventTime], parameter[name[timestamp]]]
call[name[self].timestamps.append, parameter[name[event]]] | keyword[def] identifier[change_interval] ( identifier[self] , identifier[value] ):
literal[string]
keyword[assert] keyword[not] identifier[isinstance] ( identifier[value] , identifier[string_types] )
identifier[self] . identifier[timestamps] =[]
keyword[for] identifier[timestamp] keyword[in] identifier[value] :
identifier[event] = identifier[EventTime] ( identifier[timestamp] )
identifier[self] . identifier[timestamps] . identifier[append] ( identifier[event] ) | def change_interval(self, value):
""" :param value: list of strings in format 'Day_of_Week-HH:MM' """
assert not isinstance(value, string_types)
self.timestamps = []
for timestamp in value:
event = EventTime(timestamp)
self.timestamps.append(event) # depends on [control=['for'], data=['timestamp']] |
def import_class(klass):
'''Import the named class and return that class'''
mod = __import__(klass.rpartition('.')[0])
for segment in klass.split('.')[1:-1]:
mod = getattr(mod, segment)
return getattr(mod, klass.rpartition('.')[2]) | def function[import_class, parameter[klass]]:
constant[Import the named class and return that class]
variable[mod] assign[=] call[name[__import__], parameter[call[call[name[klass].rpartition, parameter[constant[.]]]][constant[0]]]]
for taget[name[segment]] in starred[call[call[name[klass].split, parameter[constant[.]]]][<ast.Slice object at 0x7da20e954910>]] begin[:]
variable[mod] assign[=] call[name[getattr], parameter[name[mod], name[segment]]]
return[call[name[getattr], parameter[name[mod], call[call[name[klass].rpartition, parameter[constant[.]]]][constant[2]]]]] | keyword[def] identifier[import_class] ( identifier[klass] ):
literal[string]
identifier[mod] = identifier[__import__] ( identifier[klass] . identifier[rpartition] ( literal[string] )[ literal[int] ])
keyword[for] identifier[segment] keyword[in] identifier[klass] . identifier[split] ( literal[string] )[ literal[int] :- literal[int] ]:
identifier[mod] = identifier[getattr] ( identifier[mod] , identifier[segment] )
keyword[return] identifier[getattr] ( identifier[mod] , identifier[klass] . identifier[rpartition] ( literal[string] )[ literal[int] ]) | def import_class(klass):
"""Import the named class and return that class"""
mod = __import__(klass.rpartition('.')[0])
for segment in klass.split('.')[1:-1]:
mod = getattr(mod, segment) # depends on [control=['for'], data=['segment']]
return getattr(mod, klass.rpartition('.')[2]) |
def _checkReturnTo(self, message, return_to):
"""Check an OpenID message and its openid.return_to value
against a return_to URL from an application. Return True on
success, False on failure.
"""
# Check the openid.return_to args against args in the original
# message.
try:
self._verifyReturnToArgs(message.toPostArgs())
except ProtocolError as why:
logging.exception("Verifying return_to arguments: %s" % (why, ))
return False
# Check the return_to base URL against the one in the message.
msg_return_to = message.getArg(OPENID_NS, 'return_to')
# The URL scheme, authority, and path MUST be the same between
# the two URLs.
app_parts = urlparse(urinorm.urinorm(return_to))
msg_parts = urlparse(urinorm.urinorm(msg_return_to))
# (addressing scheme, network location, path) must be equal in
# both URLs.
for part in range(0, 3):
if app_parts[part] != msg_parts[part]:
return False
return True | def function[_checkReturnTo, parameter[self, message, return_to]]:
constant[Check an OpenID message and its openid.return_to value
against a return_to URL from an application. Return True on
success, False on failure.
]
<ast.Try object at 0x7da1b06caaa0>
variable[msg_return_to] assign[=] call[name[message].getArg, parameter[name[OPENID_NS], constant[return_to]]]
variable[app_parts] assign[=] call[name[urlparse], parameter[call[name[urinorm].urinorm, parameter[name[return_to]]]]]
variable[msg_parts] assign[=] call[name[urlparse], parameter[call[name[urinorm].urinorm, parameter[name[msg_return_to]]]]]
for taget[name[part]] in starred[call[name[range], parameter[constant[0], constant[3]]]] begin[:]
if compare[call[name[app_parts]][name[part]] not_equal[!=] call[name[msg_parts]][name[part]]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_checkReturnTo] ( identifier[self] , identifier[message] , identifier[return_to] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_verifyReturnToArgs] ( identifier[message] . identifier[toPostArgs] ())
keyword[except] identifier[ProtocolError] keyword[as] identifier[why] :
identifier[logging] . identifier[exception] ( literal[string] %( identifier[why] ,))
keyword[return] keyword[False]
identifier[msg_return_to] = identifier[message] . identifier[getArg] ( identifier[OPENID_NS] , literal[string] )
identifier[app_parts] = identifier[urlparse] ( identifier[urinorm] . identifier[urinorm] ( identifier[return_to] ))
identifier[msg_parts] = identifier[urlparse] ( identifier[urinorm] . identifier[urinorm] ( identifier[msg_return_to] ))
keyword[for] identifier[part] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[if] identifier[app_parts] [ identifier[part] ]!= identifier[msg_parts] [ identifier[part] ]:
keyword[return] keyword[False]
keyword[return] keyword[True] | def _checkReturnTo(self, message, return_to):
"""Check an OpenID message and its openid.return_to value
against a return_to URL from an application. Return True on
success, False on failure.
"""
# Check the openid.return_to args against args in the original
# message.
try:
self._verifyReturnToArgs(message.toPostArgs()) # depends on [control=['try'], data=[]]
except ProtocolError as why:
logging.exception('Verifying return_to arguments: %s' % (why,))
return False # depends on [control=['except'], data=['why']]
# Check the return_to base URL against the one in the message.
msg_return_to = message.getArg(OPENID_NS, 'return_to')
# The URL scheme, authority, and path MUST be the same between
# the two URLs.
app_parts = urlparse(urinorm.urinorm(return_to))
msg_parts = urlparse(urinorm.urinorm(msg_return_to))
# (addressing scheme, network location, path) must be equal in
# both URLs.
for part in range(0, 3):
if app_parts[part] != msg_parts[part]:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']]
return True |
def rename_distribution_list(self, dl_description, new_dl_name):
"""
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param new_dl_name: new name of the list
:return: a zobjects.DistributionList
"""
resp = self.request('RenameDistributionList', {
'id': self._get_or_fetch_id(dl_description,
self.get_distribution_list),
'newName': new_dl_name
})
return zobjects.DistributionList.from_dict(resp['dl']) | def function[rename_distribution_list, parameter[self, dl_description, new_dl_name]]:
constant[
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param new_dl_name: new name of the list
:return: a zobjects.DistributionList
]
variable[resp] assign[=] call[name[self].request, parameter[constant[RenameDistributionList], dictionary[[<ast.Constant object at 0x7da18ede5f90>, <ast.Constant object at 0x7da18ede6410>], [<ast.Call object at 0x7da18ede4e50>, <ast.Name object at 0x7da18ede5180>]]]]
return[call[name[zobjects].DistributionList.from_dict, parameter[call[name[resp]][constant[dl]]]]] | keyword[def] identifier[rename_distribution_list] ( identifier[self] , identifier[dl_description] , identifier[new_dl_name] ):
literal[string]
identifier[resp] = identifier[self] . identifier[request] ( literal[string] ,{
literal[string] : identifier[self] . identifier[_get_or_fetch_id] ( identifier[dl_description] ,
identifier[self] . identifier[get_distribution_list] ),
literal[string] : identifier[new_dl_name]
})
keyword[return] identifier[zobjects] . identifier[DistributionList] . identifier[from_dict] ( identifier[resp] [ literal[string] ]) | def rename_distribution_list(self, dl_description, new_dl_name):
"""
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param new_dl_name: new name of the list
:return: a zobjects.DistributionList
"""
resp = self.request('RenameDistributionList', {'id': self._get_or_fetch_id(dl_description, self.get_distribution_list), 'newName': new_dl_name})
return zobjects.DistributionList.from_dict(resp['dl']) |
def user_related(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users#user-related-information"
api_path = "/api/v2/users/{id}/related.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | def function[user_related, parameter[self, id]]:
constant[https://developer.zendesk.com/rest_api/docs/core/users#user-related-information]
variable[api_path] assign[=] constant[/api/v2/users/{id}/related.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[user_related] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] ) | def user_related(self, id, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/core/users#user-related-information"""
api_path = '/api/v2/users/{id}/related.json'
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder) | def function[__placeBuyOrder, parameter[self, tick]]:
constant[ place buy order]
variable[cash] assign[=] call[name[self].__getCashToBuyStock, parameter[]]
if compare[name[cash] equal[==] constant[0]] begin[:]
return[None]
variable[share] assign[=] call[name[math].floor, parameter[binary_operation[name[cash] / call[name[float], parameter[name[tick].close]]]]]
variable[buyOrder] assign[=] call[name[Order], parameter[]]
if call[name[self].__strategy.placeOrder, parameter[name[buyOrder]]] begin[:]
name[self].__buyOrder assign[=] name[buyOrder]
variable[stopOrder] assign[=] call[name[Order], parameter[]]
call[name[self].__placeStopOrder, parameter[name[stopOrder]]] | keyword[def] identifier[__placeBuyOrder] ( identifier[self] , identifier[tick] ):
literal[string]
identifier[cash] = identifier[self] . identifier[__getCashToBuyStock] ()
keyword[if] identifier[cash] == literal[int] :
keyword[return]
identifier[share] = identifier[math] . identifier[floor] ( identifier[cash] / identifier[float] ( identifier[tick] . identifier[close] ))
identifier[buyOrder] = identifier[Order] ( identifier[accountId] = identifier[self] . identifier[__strategy] . identifier[accountId] ,
identifier[action] = identifier[Action] . identifier[BUY] ,
identifier[is_market] = keyword[True] ,
identifier[security] = identifier[self] . identifier[__security] ,
identifier[share] = identifier[share] )
keyword[if] identifier[self] . identifier[__strategy] . identifier[placeOrder] ( identifier[buyOrder] ):
identifier[self] . identifier[__buyOrder] = identifier[buyOrder]
identifier[stopOrder] = identifier[Order] ( identifier[accountId] = identifier[self] . identifier[__strategy] . identifier[accountId] ,
identifier[action] = identifier[Action] . identifier[SELL] ,
identifier[is_stop] = keyword[True] ,
identifier[security] = identifier[self] . identifier[__security] ,
identifier[price] = identifier[tick] . identifier[close] * literal[int] ,
identifier[share] = literal[int] - identifier[share] )
identifier[self] . identifier[__placeStopOrder] ( identifier[stopOrder] ) | def __placeBuyOrder(self, tick):
""" place buy order"""
cash = self.__getCashToBuyStock()
if cash == 0:
return # depends on [control=['if'], data=[]]
share = math.floor(cash / float(tick.close))
buyOrder = Order(accountId=self.__strategy.accountId, action=Action.BUY, is_market=True, security=self.__security, share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder = buyOrder
# place stop order
stopOrder = Order(accountId=self.__strategy.accountId, action=Action.SELL, is_stop=True, security=self.__security, price=tick.close * 0.95, share=0 - share)
self.__placeStopOrder(stopOrder) # depends on [control=['if'], data=[]] |
def get_contradiction_summary(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity, str]]:
"""Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs
that have multiple, contradictory relations.
"""
for u, v in set(graph.edges()):
relations = {data[RELATION] for data in graph[u][v].values()}
if relation_set_has_contradictions(relations):
yield u, v, relations | def function[get_contradiction_summary, parameter[graph]]:
constant[Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs
that have multiple, contradictory relations.
]
for taget[tuple[[<ast.Name object at 0x7da1afe893c0>, <ast.Name object at 0x7da1afe888b0>]]] in starred[call[name[set], parameter[call[name[graph].edges, parameter[]]]]] begin[:]
variable[relations] assign[=] <ast.SetComp object at 0x7da1afe895a0>
if call[name[relation_set_has_contradictions], parameter[name[relations]]] begin[:]
<ast.Yield object at 0x7da1afe8bb50> | keyword[def] identifier[get_contradiction_summary] ( identifier[graph] : identifier[BELGraph] )-> identifier[Iterable] [ identifier[Tuple] [ identifier[BaseEntity] , identifier[BaseEntity] , identifier[str] ]]:
literal[string]
keyword[for] identifier[u] , identifier[v] keyword[in] identifier[set] ( identifier[graph] . identifier[edges] ()):
identifier[relations] ={ identifier[data] [ identifier[RELATION] ] keyword[for] identifier[data] keyword[in] identifier[graph] [ identifier[u] ][ identifier[v] ]. identifier[values] ()}
keyword[if] identifier[relation_set_has_contradictions] ( identifier[relations] ):
keyword[yield] identifier[u] , identifier[v] , identifier[relations] | def get_contradiction_summary(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity, str]]:
"""Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs
that have multiple, contradictory relations.
"""
for (u, v) in set(graph.edges()):
relations = {data[RELATION] for data in graph[u][v].values()}
if relation_set_has_contradictions(relations):
yield (u, v, relations) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def _compute_unnecessary_deps(self, target, actual_deps):
"""Computes unused deps for the given Target.
:returns: A dict of directly declared but unused targets, to sets of suggested replacements.
"""
# Flatten the product deps of this target.
product_deps = set()
for dep_entries in actual_deps.values():
product_deps.update(dep_entries)
# Determine which of the DEFAULT deps in the declared set of this target were used.
used = set()
unused = set()
for dep, _ in self._analyzer.resolve_aliases(target, scope=Scopes.DEFAULT):
if dep in used or dep in unused:
continue
# TODO: What's a better way to accomplish this check? Filtering by `has_sources` would
# incorrectly skip "empty" `*_library` targets, which could then be used as a loophole.
if isinstance(dep, (Resources, UnpackedJars)):
continue
# If any of the target's jars or classfiles were used, consider it used.
if product_deps.isdisjoint(self._analyzer.files_for_target(dep)):
unused.add(dep)
else:
used.add(dep)
# If there were no unused deps, break.
if not unused:
return {}
# For any deps that were used, count their derived-from targets used as well.
# TODO: Refactor to do some of this above once tests are in place.
for dep in list(used):
for derived_from in dep.derived_from_chain:
if derived_from in unused:
unused.remove(derived_from)
used.add(derived_from)
# Prune derived targets that would be in the set twice.
for dep in list(unused):
if set(dep.derived_from_chain) & unused:
unused.remove(dep)
if not unused:
return {}
# For any deps that were not used, determine whether their transitive deps were used, and
# recommend those as replacements.
replacements = {}
for dep in unused:
replacements[dep] = set()
for t in dep.closure():
if t in used or t in unused:
continue
if not product_deps.isdisjoint(self._analyzer.files_for_target(t)):
replacements[dep].add(t.concrete_derived_from)
return replacements | def function[_compute_unnecessary_deps, parameter[self, target, actual_deps]]:
constant[Computes unused deps for the given Target.
:returns: A dict of directly declared but unused targets, to sets of suggested replacements.
]
variable[product_deps] assign[=] call[name[set], parameter[]]
for taget[name[dep_entries]] in starred[call[name[actual_deps].values, parameter[]]] begin[:]
call[name[product_deps].update, parameter[name[dep_entries]]]
variable[used] assign[=] call[name[set], parameter[]]
variable[unused] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b22ae7d0>, <ast.Name object at 0x7da1b22adbd0>]]] in starred[call[name[self]._analyzer.resolve_aliases, parameter[name[target]]]] begin[:]
if <ast.BoolOp object at 0x7da1b22ad480> begin[:]
continue
if call[name[isinstance], parameter[name[dep], tuple[[<ast.Name object at 0x7da1b2254c10>, <ast.Name object at 0x7da1b2257940>]]]] begin[:]
continue
if call[name[product_deps].isdisjoint, parameter[call[name[self]._analyzer.files_for_target, parameter[name[dep]]]]] begin[:]
call[name[unused].add, parameter[name[dep]]]
if <ast.UnaryOp object at 0x7da1b2256e30> begin[:]
return[dictionary[[], []]]
for taget[name[dep]] in starred[call[name[list], parameter[name[used]]]] begin[:]
for taget[name[derived_from]] in starred[name[dep].derived_from_chain] begin[:]
if compare[name[derived_from] in name[unused]] begin[:]
call[name[unused].remove, parameter[name[derived_from]]]
call[name[used].add, parameter[name[derived_from]]]
for taget[name[dep]] in starred[call[name[list], parameter[name[unused]]]] begin[:]
if binary_operation[call[name[set], parameter[name[dep].derived_from_chain]] <ast.BitAnd object at 0x7da2590d6b60> name[unused]] begin[:]
call[name[unused].remove, parameter[name[dep]]]
if <ast.UnaryOp object at 0x7da18fe93430> begin[:]
return[dictionary[[], []]]
variable[replacements] assign[=] dictionary[[], []]
for taget[name[dep]] in starred[name[unused]] begin[:]
call[name[replacements]][name[dep]] assign[=] call[name[set], parameter[]]
for taget[name[t]] in starred[call[name[dep].closure, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18fe92440> begin[:]
continue
if <ast.UnaryOp object at 0x7da18fe90c10> begin[:]
call[call[name[replacements]][name[dep]].add, parameter[name[t].concrete_derived_from]]
return[name[replacements]] | keyword[def] identifier[_compute_unnecessary_deps] ( identifier[self] , identifier[target] , identifier[actual_deps] ):
literal[string]
identifier[product_deps] = identifier[set] ()
keyword[for] identifier[dep_entries] keyword[in] identifier[actual_deps] . identifier[values] ():
identifier[product_deps] . identifier[update] ( identifier[dep_entries] )
identifier[used] = identifier[set] ()
identifier[unused] = identifier[set] ()
keyword[for] identifier[dep] , identifier[_] keyword[in] identifier[self] . identifier[_analyzer] . identifier[resolve_aliases] ( identifier[target] , identifier[scope] = identifier[Scopes] . identifier[DEFAULT] ):
keyword[if] identifier[dep] keyword[in] identifier[used] keyword[or] identifier[dep] keyword[in] identifier[unused] :
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[dep] ,( identifier[Resources] , identifier[UnpackedJars] )):
keyword[continue]
keyword[if] identifier[product_deps] . identifier[isdisjoint] ( identifier[self] . identifier[_analyzer] . identifier[files_for_target] ( identifier[dep] )):
identifier[unused] . identifier[add] ( identifier[dep] )
keyword[else] :
identifier[used] . identifier[add] ( identifier[dep] )
keyword[if] keyword[not] identifier[unused] :
keyword[return] {}
keyword[for] identifier[dep] keyword[in] identifier[list] ( identifier[used] ):
keyword[for] identifier[derived_from] keyword[in] identifier[dep] . identifier[derived_from_chain] :
keyword[if] identifier[derived_from] keyword[in] identifier[unused] :
identifier[unused] . identifier[remove] ( identifier[derived_from] )
identifier[used] . identifier[add] ( identifier[derived_from] )
keyword[for] identifier[dep] keyword[in] identifier[list] ( identifier[unused] ):
keyword[if] identifier[set] ( identifier[dep] . identifier[derived_from_chain] )& identifier[unused] :
identifier[unused] . identifier[remove] ( identifier[dep] )
keyword[if] keyword[not] identifier[unused] :
keyword[return] {}
identifier[replacements] ={}
keyword[for] identifier[dep] keyword[in] identifier[unused] :
identifier[replacements] [ identifier[dep] ]= identifier[set] ()
keyword[for] identifier[t] keyword[in] identifier[dep] . identifier[closure] ():
keyword[if] identifier[t] keyword[in] identifier[used] keyword[or] identifier[t] keyword[in] identifier[unused] :
keyword[continue]
keyword[if] keyword[not] identifier[product_deps] . identifier[isdisjoint] ( identifier[self] . identifier[_analyzer] . identifier[files_for_target] ( identifier[t] )):
identifier[replacements] [ identifier[dep] ]. identifier[add] ( identifier[t] . identifier[concrete_derived_from] )
keyword[return] identifier[replacements] | def _compute_unnecessary_deps(self, target, actual_deps):
"""Computes unused deps for the given Target.
:returns: A dict of directly declared but unused targets, to sets of suggested replacements.
"""
# Flatten the product deps of this target.
product_deps = set()
for dep_entries in actual_deps.values():
product_deps.update(dep_entries) # depends on [control=['for'], data=['dep_entries']]
# Determine which of the DEFAULT deps in the declared set of this target were used.
used = set()
unused = set()
for (dep, _) in self._analyzer.resolve_aliases(target, scope=Scopes.DEFAULT):
if dep in used or dep in unused:
continue # depends on [control=['if'], data=[]]
# TODO: What's a better way to accomplish this check? Filtering by `has_sources` would
# incorrectly skip "empty" `*_library` targets, which could then be used as a loophole.
if isinstance(dep, (Resources, UnpackedJars)):
continue # depends on [control=['if'], data=[]]
# If any of the target's jars or classfiles were used, consider it used.
if product_deps.isdisjoint(self._analyzer.files_for_target(dep)):
unused.add(dep) # depends on [control=['if'], data=[]]
else:
used.add(dep) # depends on [control=['for'], data=[]]
# If there were no unused deps, break.
if not unused:
return {} # depends on [control=['if'], data=[]]
# For any deps that were used, count their derived-from targets used as well.
# TODO: Refactor to do some of this above once tests are in place.
for dep in list(used):
for derived_from in dep.derived_from_chain:
if derived_from in unused:
unused.remove(derived_from)
used.add(derived_from) # depends on [control=['if'], data=['derived_from', 'unused']] # depends on [control=['for'], data=['derived_from']] # depends on [control=['for'], data=['dep']]
# Prune derived targets that would be in the set twice.
for dep in list(unused):
if set(dep.derived_from_chain) & unused:
unused.remove(dep) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dep']]
if not unused:
return {} # depends on [control=['if'], data=[]]
# For any deps that were not used, determine whether their transitive deps were used, and
# recommend those as replacements.
replacements = {}
for dep in unused:
replacements[dep] = set()
for t in dep.closure():
if t in used or t in unused:
continue # depends on [control=['if'], data=[]]
if not product_deps.isdisjoint(self._analyzer.files_for_target(t)):
replacements[dep].add(t.concrete_derived_from) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] # depends on [control=['for'], data=['dep']]
return replacements |
def windows_union(windows):
"""Given a list of (beginning, ending), return a minimal version that contains the same ranges.
:rtype: list
"""
def fix_overlap(left, right):
if left == right:
return [left]
assert left[0] < right[0]
if left[1] >= right[0]:
if right[1] > left[1]:
return [(left[0], right[1])]
else:
return [left]
return [left, right]
if len(windows) == 1:
return windows
none_left = []
none_right = []
otherwise = []
for window in windows:
if window[0] is None:
none_left.append(window)
elif window[1] is None:
none_right.append(window)
else:
otherwise.append(window)
res = []
otherwise.sort()
for window in none_left:
if not res:
res.append(window)
continue
res.extend(fix_overlap(res.pop(), window))
while otherwise:
window = otherwise.pop(0)
if not res:
res.append(window)
continue
res.extend(fix_overlap(res.pop(), window))
for window in none_right:
if not res:
res.append(window)
continue
res.extend(fix_overlap(res.pop(), window))
return res | def function[windows_union, parameter[windows]]:
constant[Given a list of (beginning, ending), return a minimal version that contains the same ranges.
:rtype: list
]
def function[fix_overlap, parameter[left, right]]:
if compare[name[left] equal[==] name[right]] begin[:]
return[list[[<ast.Name object at 0x7da1b0ba64a0>]]]
assert[compare[call[name[left]][constant[0]] less[<] call[name[right]][constant[0]]]]
if compare[call[name[left]][constant[1]] greater_or_equal[>=] call[name[right]][constant[0]]] begin[:]
if compare[call[name[right]][constant[1]] greater[>] call[name[left]][constant[1]]] begin[:]
return[list[[<ast.Tuple object at 0x7da1b0ba67a0>]]]
return[list[[<ast.Name object at 0x7da1b0ba59f0>, <ast.Name object at 0x7da1b0ba5990>]]]
if compare[call[name[len], parameter[name[windows]]] equal[==] constant[1]] begin[:]
return[name[windows]]
variable[none_left] assign[=] list[[]]
variable[none_right] assign[=] list[[]]
variable[otherwise] assign[=] list[[]]
for taget[name[window]] in starred[name[windows]] begin[:]
if compare[call[name[window]][constant[0]] is constant[None]] begin[:]
call[name[none_left].append, parameter[name[window]]]
variable[res] assign[=] list[[]]
call[name[otherwise].sort, parameter[]]
for taget[name[window]] in starred[name[none_left]] begin[:]
if <ast.UnaryOp object at 0x7da1b0ba4d60> begin[:]
call[name[res].append, parameter[name[window]]]
continue
call[name[res].extend, parameter[call[name[fix_overlap], parameter[call[name[res].pop, parameter[]], name[window]]]]]
while name[otherwise] begin[:]
variable[window] assign[=] call[name[otherwise].pop, parameter[constant[0]]]
if <ast.UnaryOp object at 0x7da1b0ba43a0> begin[:]
call[name[res].append, parameter[name[window]]]
continue
call[name[res].extend, parameter[call[name[fix_overlap], parameter[call[name[res].pop, parameter[]], name[window]]]]]
for taget[name[window]] in starred[name[none_right]] begin[:]
if <ast.UnaryOp object at 0x7da1b0ba7ca0> begin[:]
call[name[res].append, parameter[name[window]]]
continue
call[name[res].extend, parameter[call[name[fix_overlap], parameter[call[name[res].pop, parameter[]], name[window]]]]]
return[name[res]] | keyword[def] identifier[windows_union] ( identifier[windows] ):
literal[string]
keyword[def] identifier[fix_overlap] ( identifier[left] , identifier[right] ):
keyword[if] identifier[left] == identifier[right] :
keyword[return] [ identifier[left] ]
keyword[assert] identifier[left] [ literal[int] ]< identifier[right] [ literal[int] ]
keyword[if] identifier[left] [ literal[int] ]>= identifier[right] [ literal[int] ]:
keyword[if] identifier[right] [ literal[int] ]> identifier[left] [ literal[int] ]:
keyword[return] [( identifier[left] [ literal[int] ], identifier[right] [ literal[int] ])]
keyword[else] :
keyword[return] [ identifier[left] ]
keyword[return] [ identifier[left] , identifier[right] ]
keyword[if] identifier[len] ( identifier[windows] )== literal[int] :
keyword[return] identifier[windows]
identifier[none_left] =[]
identifier[none_right] =[]
identifier[otherwise] =[]
keyword[for] identifier[window] keyword[in] identifier[windows] :
keyword[if] identifier[window] [ literal[int] ] keyword[is] keyword[None] :
identifier[none_left] . identifier[append] ( identifier[window] )
keyword[elif] identifier[window] [ literal[int] ] keyword[is] keyword[None] :
identifier[none_right] . identifier[append] ( identifier[window] )
keyword[else] :
identifier[otherwise] . identifier[append] ( identifier[window] )
identifier[res] =[]
identifier[otherwise] . identifier[sort] ()
keyword[for] identifier[window] keyword[in] identifier[none_left] :
keyword[if] keyword[not] identifier[res] :
identifier[res] . identifier[append] ( identifier[window] )
keyword[continue]
identifier[res] . identifier[extend] ( identifier[fix_overlap] ( identifier[res] . identifier[pop] (), identifier[window] ))
keyword[while] identifier[otherwise] :
identifier[window] = identifier[otherwise] . identifier[pop] ( literal[int] )
keyword[if] keyword[not] identifier[res] :
identifier[res] . identifier[append] ( identifier[window] )
keyword[continue]
identifier[res] . identifier[extend] ( identifier[fix_overlap] ( identifier[res] . identifier[pop] (), identifier[window] ))
keyword[for] identifier[window] keyword[in] identifier[none_right] :
keyword[if] keyword[not] identifier[res] :
identifier[res] . identifier[append] ( identifier[window] )
keyword[continue]
identifier[res] . identifier[extend] ( identifier[fix_overlap] ( identifier[res] . identifier[pop] (), identifier[window] ))
keyword[return] identifier[res] | def windows_union(windows):
"""Given a list of (beginning, ending), return a minimal version that contains the same ranges.
:rtype: list
"""
def fix_overlap(left, right):
if left == right:
return [left] # depends on [control=['if'], data=['left']]
assert left[0] < right[0]
if left[1] >= right[0]:
if right[1] > left[1]:
return [(left[0], right[1])] # depends on [control=['if'], data=[]]
else:
return [left] # depends on [control=['if'], data=[]]
return [left, right]
if len(windows) == 1:
return windows # depends on [control=['if'], data=[]]
none_left = []
none_right = []
otherwise = []
for window in windows:
if window[0] is None:
none_left.append(window) # depends on [control=['if'], data=[]]
elif window[1] is None:
none_right.append(window) # depends on [control=['if'], data=[]]
else:
otherwise.append(window) # depends on [control=['for'], data=['window']]
res = []
otherwise.sort()
for window in none_left:
if not res:
res.append(window)
continue # depends on [control=['if'], data=[]]
res.extend(fix_overlap(res.pop(), window)) # depends on [control=['for'], data=['window']]
while otherwise:
window = otherwise.pop(0)
if not res:
res.append(window)
continue # depends on [control=['if'], data=[]]
res.extend(fix_overlap(res.pop(), window)) # depends on [control=['while'], data=[]]
for window in none_right:
if not res:
res.append(window)
continue # depends on [control=['if'], data=[]]
res.extend(fix_overlap(res.pop(), window)) # depends on [control=['for'], data=['window']]
return res |
def qquery(xml_thing, xpath_thing, vars=None, funcs=None):
'''
Quick query. Convenience for using the MicroXPath engine.
Give it some XML and an expression and it will yield the results. No fuss.
xml_thing - bytes or string, or amara3.xml.tree node
xpath_thing - string or parsed XPath expression
vars - optional mapping of variables, name to value
funcs - optional mapping of functions, name to function object
>>> from amara3.uxml.uxpath import qquery
>>> results = qquery(b'<a>1<b>2</b>3</a>', 'a/text()'))
>>> next(results).xml_value
'1'
>>> next(results).xml_value
'3'
'''
root = None
if isinstance(xml_thing, nodetype):
root = xml_thing
elif isinstance(xml_thing, str):
tb = tree.treebuilder()
root = tb.parse(xml_thing)
elif isinstance(xml_thing, bytes):
tb = tree.treebuilder()
#Force UTF-8
root = tb.parse(xml_thing.decode('utf-8'))
if not root: return
if isinstance(xpath_thing, str):
parsed_expr = parse(xpath_thing)
ctx = context(root, variables=vars, functions=funcs)
result = parsed_expr.compute(ctx)
yield from result | def function[qquery, parameter[xml_thing, xpath_thing, vars, funcs]]:
constant[
Quick query. Convenience for using the MicroXPath engine.
Give it some XML and an expression and it will yield the results. No fuss.
xml_thing - bytes or string, or amara3.xml.tree node
xpath_thing - string or parsed XPath expression
vars - optional mapping of variables, name to value
funcs - optional mapping of functions, name to function object
>>> from amara3.uxml.uxpath import qquery
>>> results = qquery(b'<a>1<b>2</b>3</a>', 'a/text()'))
>>> next(results).xml_value
'1'
>>> next(results).xml_value
'3'
]
variable[root] assign[=] constant[None]
if call[name[isinstance], parameter[name[xml_thing], name[nodetype]]] begin[:]
variable[root] assign[=] name[xml_thing]
if <ast.UnaryOp object at 0x7da18f00e020> begin[:]
return[None]
if call[name[isinstance], parameter[name[xpath_thing], name[str]]] begin[:]
variable[parsed_expr] assign[=] call[name[parse], parameter[name[xpath_thing]]]
variable[ctx] assign[=] call[name[context], parameter[name[root]]]
variable[result] assign[=] call[name[parsed_expr].compute, parameter[name[ctx]]]
<ast.YieldFrom object at 0x7da18f00cc40> | keyword[def] identifier[qquery] ( identifier[xml_thing] , identifier[xpath_thing] , identifier[vars] = keyword[None] , identifier[funcs] = keyword[None] ):
literal[string]
identifier[root] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[xml_thing] , identifier[nodetype] ):
identifier[root] = identifier[xml_thing]
keyword[elif] identifier[isinstance] ( identifier[xml_thing] , identifier[str] ):
identifier[tb] = identifier[tree] . identifier[treebuilder] ()
identifier[root] = identifier[tb] . identifier[parse] ( identifier[xml_thing] )
keyword[elif] identifier[isinstance] ( identifier[xml_thing] , identifier[bytes] ):
identifier[tb] = identifier[tree] . identifier[treebuilder] ()
identifier[root] = identifier[tb] . identifier[parse] ( identifier[xml_thing] . identifier[decode] ( literal[string] ))
keyword[if] keyword[not] identifier[root] : keyword[return]
keyword[if] identifier[isinstance] ( identifier[xpath_thing] , identifier[str] ):
identifier[parsed_expr] = identifier[parse] ( identifier[xpath_thing] )
identifier[ctx] = identifier[context] ( identifier[root] , identifier[variables] = identifier[vars] , identifier[functions] = identifier[funcs] )
identifier[result] = identifier[parsed_expr] . identifier[compute] ( identifier[ctx] )
keyword[yield] keyword[from] identifier[result] | def qquery(xml_thing, xpath_thing, vars=None, funcs=None):
"""
Quick query. Convenience for using the MicroXPath engine.
Give it some XML and an expression and it will yield the results. No fuss.
xml_thing - bytes or string, or amara3.xml.tree node
xpath_thing - string or parsed XPath expression
vars - optional mapping of variables, name to value
funcs - optional mapping of functions, name to function object
>>> from amara3.uxml.uxpath import qquery
>>> results = qquery(b'<a>1<b>2</b>3</a>', 'a/text()'))
>>> next(results).xml_value
'1'
>>> next(results).xml_value
'3'
"""
root = None
if isinstance(xml_thing, nodetype):
root = xml_thing # depends on [control=['if'], data=[]]
elif isinstance(xml_thing, str):
tb = tree.treebuilder()
root = tb.parse(xml_thing) # depends on [control=['if'], data=[]]
elif isinstance(xml_thing, bytes):
tb = tree.treebuilder()
#Force UTF-8
root = tb.parse(xml_thing.decode('utf-8')) # depends on [control=['if'], data=[]]
if not root:
return # depends on [control=['if'], data=[]]
if isinstance(xpath_thing, str):
parsed_expr = parse(xpath_thing) # depends on [control=['if'], data=[]]
ctx = context(root, variables=vars, functions=funcs)
result = parsed_expr.compute(ctx)
yield from result |
def eventFilter(self, widget, event):
"""Event filter for search_text widget.
Emits signals when presing Enter and Shift+Enter.
This signals are used for search forward and backward.
Also, a crude hack to get tab working in the Find/Replace boxes.
"""
if event.type() == QEvent.KeyPress:
key = event.key()
shift = event.modifiers() & Qt.ShiftModifier
if key == Qt.Key_Return:
if shift:
self.return_shift_pressed.emit()
else:
self.return_pressed.emit()
if key == Qt.Key_Tab:
if self.search_text.hasFocus():
self.replace_text.set_current_text(
self.search_text.currentText())
self.focusNextChild()
return super(FindReplace, self).eventFilter(widget, event) | def function[eventFilter, parameter[self, widget, event]]:
constant[Event filter for search_text widget.
Emits signals when presing Enter and Shift+Enter.
This signals are used for search forward and backward.
Also, a crude hack to get tab working in the Find/Replace boxes.
]
if compare[call[name[event].type, parameter[]] equal[==] name[QEvent].KeyPress] begin[:]
variable[key] assign[=] call[name[event].key, parameter[]]
variable[shift] assign[=] binary_operation[call[name[event].modifiers, parameter[]] <ast.BitAnd object at 0x7da2590d6b60> name[Qt].ShiftModifier]
if compare[name[key] equal[==] name[Qt].Key_Return] begin[:]
if name[shift] begin[:]
call[name[self].return_shift_pressed.emit, parameter[]]
if compare[name[key] equal[==] name[Qt].Key_Tab] begin[:]
if call[name[self].search_text.hasFocus, parameter[]] begin[:]
call[name[self].replace_text.set_current_text, parameter[call[name[self].search_text.currentText, parameter[]]]]
call[name[self].focusNextChild, parameter[]]
return[call[call[name[super], parameter[name[FindReplace], name[self]]].eventFilter, parameter[name[widget], name[event]]]] | keyword[def] identifier[eventFilter] ( identifier[self] , identifier[widget] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[type] ()== identifier[QEvent] . identifier[KeyPress] :
identifier[key] = identifier[event] . identifier[key] ()
identifier[shift] = identifier[event] . identifier[modifiers] ()& identifier[Qt] . identifier[ShiftModifier]
keyword[if] identifier[key] == identifier[Qt] . identifier[Key_Return] :
keyword[if] identifier[shift] :
identifier[self] . identifier[return_shift_pressed] . identifier[emit] ()
keyword[else] :
identifier[self] . identifier[return_pressed] . identifier[emit] ()
keyword[if] identifier[key] == identifier[Qt] . identifier[Key_Tab] :
keyword[if] identifier[self] . identifier[search_text] . identifier[hasFocus] ():
identifier[self] . identifier[replace_text] . identifier[set_current_text] (
identifier[self] . identifier[search_text] . identifier[currentText] ())
identifier[self] . identifier[focusNextChild] ()
keyword[return] identifier[super] ( identifier[FindReplace] , identifier[self] ). identifier[eventFilter] ( identifier[widget] , identifier[event] ) | def eventFilter(self, widget, event):
"""Event filter for search_text widget.
Emits signals when presing Enter and Shift+Enter.
This signals are used for search forward and backward.
Also, a crude hack to get tab working in the Find/Replace boxes.
"""
if event.type() == QEvent.KeyPress:
key = event.key()
shift = event.modifiers() & Qt.ShiftModifier
if key == Qt.Key_Return:
if shift:
self.return_shift_pressed.emit() # depends on [control=['if'], data=[]]
else:
self.return_pressed.emit() # depends on [control=['if'], data=[]]
if key == Qt.Key_Tab:
if self.search_text.hasFocus():
self.replace_text.set_current_text(self.search_text.currentText()) # depends on [control=['if'], data=[]]
self.focusNextChild() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return super(FindReplace, self).eventFilter(widget, event) |
def volume_mesh(mesh, count):
"""
Use rejection sampling to produce points randomly distributed
in the volume of a mesh.
Parameters
----------
mesh: Trimesh object
count: int, number of samples desired
Returns
----------
samples: (n,3) float, points in the volume of the mesh.
where: n <= count
"""
points = (np.random.random((count, 3)) * mesh.extents) + mesh.bounds[0]
contained = mesh.contains(points)
samples = points[contained][:count]
return samples | def function[volume_mesh, parameter[mesh, count]]:
constant[
Use rejection sampling to produce points randomly distributed
in the volume of a mesh.
Parameters
----------
mesh: Trimesh object
count: int, number of samples desired
Returns
----------
samples: (n,3) float, points in the volume of the mesh.
where: n <= count
]
variable[points] assign[=] binary_operation[binary_operation[call[name[np].random.random, parameter[tuple[[<ast.Name object at 0x7da204567070>, <ast.Constant object at 0x7da204564e80>]]]] * name[mesh].extents] + call[name[mesh].bounds][constant[0]]]
variable[contained] assign[=] call[name[mesh].contains, parameter[name[points]]]
variable[samples] assign[=] call[call[name[points]][name[contained]]][<ast.Slice object at 0x7da204565570>]
return[name[samples]] | keyword[def] identifier[volume_mesh] ( identifier[mesh] , identifier[count] ):
literal[string]
identifier[points] =( identifier[np] . identifier[random] . identifier[random] (( identifier[count] , literal[int] ))* identifier[mesh] . identifier[extents] )+ identifier[mesh] . identifier[bounds] [ literal[int] ]
identifier[contained] = identifier[mesh] . identifier[contains] ( identifier[points] )
identifier[samples] = identifier[points] [ identifier[contained] ][: identifier[count] ]
keyword[return] identifier[samples] | def volume_mesh(mesh, count):
"""
Use rejection sampling to produce points randomly distributed
in the volume of a mesh.
Parameters
----------
mesh: Trimesh object
count: int, number of samples desired
Returns
----------
samples: (n,3) float, points in the volume of the mesh.
where: n <= count
"""
points = np.random.random((count, 3)) * mesh.extents + mesh.bounds[0]
contained = mesh.contains(points)
samples = points[contained][:count]
return samples |
def do_bo(self, arg):
"""
[~process] bo <address> - make a code breakpoint one-shot
[~thread] bo <address> - make a hardware breakpoint one-shot
[~process] bo <address-address> - make a memory breakpoint one-shot
[~process] bo <address> <size> - make a memory breakpoint one-shot
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_one_shot_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_one_shot_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.enable_one_shot_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.") | def function[do_bo, parameter[self, arg]]:
constant[
[~process] bo <address> - make a code breakpoint one-shot
[~thread] bo <address> - make a hardware breakpoint one-shot
[~process] bo <address-address> - make a memory breakpoint one-shot
[~process] bo <address> <size> - make a memory breakpoint one-shot
]
variable[token_list] assign[=] call[name[self].split_tokens, parameter[name[arg], constant[1], constant[2]]]
<ast.Tuple object at 0x7da18f7232e0> assign[=] call[name[self].input_breakpoint, parameter[name[token_list]]]
variable[debug] assign[=] name[self].debug
variable[found] assign[=] constant[False]
if compare[name[size] is constant[None]] begin[:]
if compare[name[tid] is_not constant[None]] begin[:]
if call[name[debug].has_hardware_breakpoint, parameter[name[tid], name[address]]] begin[:]
call[name[debug].enable_one_shot_hardware_breakpoint, parameter[name[tid], name[address]]]
variable[found] assign[=] constant[True]
if compare[name[pid] is_not constant[None]] begin[:]
if call[name[debug].has_code_breakpoint, parameter[name[pid], name[address]]] begin[:]
call[name[debug].enable_one_shot_code_breakpoint, parameter[name[pid], name[address]]]
variable[found] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da20c6c4910> begin[:]
call[name[print], parameter[constant[Error: breakpoint not found.]]] | keyword[def] identifier[do_bo] ( identifier[self] , identifier[arg] ):
literal[string]
identifier[token_list] = identifier[self] . identifier[split_tokens] ( identifier[arg] , literal[int] , literal[int] )
identifier[pid] , identifier[tid] , identifier[address] , identifier[size] = identifier[self] . identifier[input_breakpoint] ( identifier[token_list] )
identifier[debug] = identifier[self] . identifier[debug]
identifier[found] = keyword[False]
keyword[if] identifier[size] keyword[is] keyword[None] :
keyword[if] identifier[tid] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[debug] . identifier[has_hardware_breakpoint] ( identifier[tid] , identifier[address] ):
identifier[debug] . identifier[enable_one_shot_hardware_breakpoint] ( identifier[tid] , identifier[address] )
identifier[found] = keyword[True]
keyword[if] identifier[pid] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[debug] . identifier[has_code_breakpoint] ( identifier[pid] , identifier[address] ):
identifier[debug] . identifier[enable_one_shot_code_breakpoint] ( identifier[pid] , identifier[address] )
identifier[found] = keyword[True]
keyword[else] :
keyword[if] identifier[debug] . identifier[has_page_breakpoint] ( identifier[pid] , identifier[address] ):
identifier[debug] . identifier[enable_one_shot_page_breakpoint] ( identifier[pid] , identifier[address] )
identifier[found] = keyword[True]
keyword[if] keyword[not] identifier[found] :
identifier[print] ( literal[string] ) | def do_bo(self, arg):
"""
[~process] bo <address> - make a code breakpoint one-shot
[~thread] bo <address> - make a hardware breakpoint one-shot
[~process] bo <address-address> - make a memory breakpoint one-shot
[~process] bo <address> <size> - make a memory breakpoint one-shot
"""
token_list = self.split_tokens(arg, 1, 2)
(pid, tid, address, size) = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_one_shot_hardware_breakpoint(tid, address)
found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['tid']]
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_one_shot_code_breakpoint(pid, address)
found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pid']] # depends on [control=['if'], data=[]]
elif debug.has_page_breakpoint(pid, address):
debug.enable_one_shot_page_breakpoint(pid, address)
found = True # depends on [control=['if'], data=[]]
if not found:
print('Error: breakpoint not found.') # depends on [control=['if'], data=[]] |
def gfpa(target, illmin, abcorr, obsrvr, relate, refval, adjust, step, nintvals,
cnfine, result=None):
"""
Determine time intervals for which a specified constraint
on the phase angle between an illumination source, a target,
and observer body centers is met.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfpa_c.html
:param target: Name of the target body.
:type target: str
:param illmin: Name of the illuminating body.
:type illmin: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param relate: Relational operator.
:type relate: str
:param refval: Reference value.
:type refval: float
:param adjust: Adjustment value for absolute extrema searches.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvals: Workspace window interval count.
:type nintvals: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cnfine, stypes.SpiceCell)
assert cnfine.is_double()
if result is None:
result = stypes.SPICEDOUBLE_CELL(2000)
else:
assert isinstance(result, stypes.SpiceCell)
assert result.is_double()
target = stypes.stringToCharP(target)
illmin = stypes.stringToCharP(illmin)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
relate = stypes.stringToCharP(relate)
refval = ctypes.c_double(refval)
adjust = ctypes.c_double(adjust)
step = ctypes.c_double(step)
nintvals = ctypes.c_int(nintvals)
libspice.gfpa_c(target, illmin, abcorr, obsrvr, relate, refval,
adjust, step, nintvals, ctypes.byref(cnfine),
ctypes.byref(result))
return result | def function[gfpa, parameter[target, illmin, abcorr, obsrvr, relate, refval, adjust, step, nintvals, cnfine, result]]:
constant[
Determine time intervals for which a specified constraint
on the phase angle between an illumination source, a target,
and observer body centers is met.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfpa_c.html
:param target: Name of the target body.
:type target: str
:param illmin: Name of the illuminating body.
:type illmin: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param relate: Relational operator.
:type relate: str
:param refval: Reference value.
:type refval: float
:param adjust: Adjustment value for absolute extrema searches.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvals: Workspace window interval count.
:type nintvals: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
]
assert[call[name[isinstance], parameter[name[cnfine], name[stypes].SpiceCell]]]
assert[call[name[cnfine].is_double, parameter[]]]
if compare[name[result] is constant[None]] begin[:]
variable[result] assign[=] call[name[stypes].SPICEDOUBLE_CELL, parameter[constant[2000]]]
variable[target] assign[=] call[name[stypes].stringToCharP, parameter[name[target]]]
variable[illmin] assign[=] call[name[stypes].stringToCharP, parameter[name[illmin]]]
variable[abcorr] assign[=] call[name[stypes].stringToCharP, parameter[name[abcorr]]]
variable[obsrvr] assign[=] call[name[stypes].stringToCharP, parameter[name[obsrvr]]]
variable[relate] assign[=] call[name[stypes].stringToCharP, parameter[name[relate]]]
variable[refval] assign[=] call[name[ctypes].c_double, parameter[name[refval]]]
variable[adjust] assign[=] call[name[ctypes].c_double, parameter[name[adjust]]]
variable[step] assign[=] call[name[ctypes].c_double, parameter[name[step]]]
variable[nintvals] assign[=] call[name[ctypes].c_int, parameter[name[nintvals]]]
call[name[libspice].gfpa_c, parameter[name[target], name[illmin], name[abcorr], name[obsrvr], name[relate], name[refval], name[adjust], name[step], name[nintvals], call[name[ctypes].byref, parameter[name[cnfine]]], call[name[ctypes].byref, parameter[name[result]]]]]
return[name[result]] | keyword[def] identifier[gfpa] ( identifier[target] , identifier[illmin] , identifier[abcorr] , identifier[obsrvr] , identifier[relate] , identifier[refval] , identifier[adjust] , identifier[step] , identifier[nintvals] ,
identifier[cnfine] , identifier[result] = keyword[None] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[cnfine] , identifier[stypes] . identifier[SpiceCell] )
keyword[assert] identifier[cnfine] . identifier[is_double] ()
keyword[if] identifier[result] keyword[is] keyword[None] :
identifier[result] = identifier[stypes] . identifier[SPICEDOUBLE_CELL] ( literal[int] )
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[result] , identifier[stypes] . identifier[SpiceCell] )
keyword[assert] identifier[result] . identifier[is_double] ()
identifier[target] = identifier[stypes] . identifier[stringToCharP] ( identifier[target] )
identifier[illmin] = identifier[stypes] . identifier[stringToCharP] ( identifier[illmin] )
identifier[abcorr] = identifier[stypes] . identifier[stringToCharP] ( identifier[abcorr] )
identifier[obsrvr] = identifier[stypes] . identifier[stringToCharP] ( identifier[obsrvr] )
identifier[relate] = identifier[stypes] . identifier[stringToCharP] ( identifier[relate] )
identifier[refval] = identifier[ctypes] . identifier[c_double] ( identifier[refval] )
identifier[adjust] = identifier[ctypes] . identifier[c_double] ( identifier[adjust] )
identifier[step] = identifier[ctypes] . identifier[c_double] ( identifier[step] )
identifier[nintvals] = identifier[ctypes] . identifier[c_int] ( identifier[nintvals] )
identifier[libspice] . identifier[gfpa_c] ( identifier[target] , identifier[illmin] , identifier[abcorr] , identifier[obsrvr] , identifier[relate] , identifier[refval] ,
identifier[adjust] , identifier[step] , identifier[nintvals] , identifier[ctypes] . identifier[byref] ( identifier[cnfine] ),
identifier[ctypes] . identifier[byref] ( identifier[result] ))
keyword[return] identifier[result] | def gfpa(target, illmin, abcorr, obsrvr, relate, refval, adjust, step, nintvals, cnfine, result=None):
"""
Determine time intervals for which a specified constraint
on the phase angle between an illumination source, a target,
and observer body centers is met.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfpa_c.html
:param target: Name of the target body.
:type target: str
:param illmin: Name of the illuminating body.
:type illmin: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param relate: Relational operator.
:type relate: str
:param refval: Reference value.
:type refval: float
:param adjust: Adjustment value for absolute extrema searches.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvals: Workspace window interval count.
:type nintvals: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cnfine, stypes.SpiceCell)
assert cnfine.is_double()
if result is None:
result = stypes.SPICEDOUBLE_CELL(2000) # depends on [control=['if'], data=['result']]
else:
assert isinstance(result, stypes.SpiceCell)
assert result.is_double()
target = stypes.stringToCharP(target)
illmin = stypes.stringToCharP(illmin)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
relate = stypes.stringToCharP(relate)
refval = ctypes.c_double(refval)
adjust = ctypes.c_double(adjust)
step = ctypes.c_double(step)
nintvals = ctypes.c_int(nintvals)
libspice.gfpa_c(target, illmin, abcorr, obsrvr, relate, refval, adjust, step, nintvals, ctypes.byref(cnfine), ctypes.byref(result))
return result |
def process_IN_MOVED_TO(self, raw_event):
"""
Map the source path with the destination path (+ date for
cleaning).
"""
watch_ = self._watch_manager.get_watch(raw_event.wd)
path_ = watch_.path
dst_path = os.path.normpath(os.path.join(path_, raw_event.name))
mv_ = self._mv_cookie.get(raw_event.cookie)
to_append = {'cookie': raw_event.cookie}
if mv_ is not None:
self._mv[mv_[0]] = (dst_path, datetime.now())
# Let's assume that IN_MOVED_FROM event is always queued before
# that its associated (they share a common cookie) IN_MOVED_TO
# event is queued itself. It is then possible in that scenario
# to provide as additional information to the IN_MOVED_TO event
# the original pathname of the moved file/directory.
to_append['src_pathname'] = mv_[0]
elif (raw_event.mask & IN_ISDIR and watch_.auto_add and
not watch_.exclude_filter(dst_path)):
# We got a diretory that's "moved in" from an unknown source and
# auto_add is enabled. Manually add watches to the inner subtrees.
# The newly monitored directory inherits attributes from its
# parent directory.
self._watch_manager.add_watch(dst_path, watch_.mask,
proc_fun=watch_.proc_fun,
rec=True, auto_add=True,
exclude_filter=watch_.exclude_filter)
return self.process_default(raw_event, to_append) | def function[process_IN_MOVED_TO, parameter[self, raw_event]]:
constant[
Map the source path with the destination path (+ date for
cleaning).
]
variable[watch_] assign[=] call[name[self]._watch_manager.get_watch, parameter[name[raw_event].wd]]
variable[path_] assign[=] name[watch_].path
variable[dst_path] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[path_], name[raw_event].name]]]]
variable[mv_] assign[=] call[name[self]._mv_cookie.get, parameter[name[raw_event].cookie]]
variable[to_append] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c69b0>], [<ast.Attribute object at 0x7da20c6c41f0>]]
if compare[name[mv_] is_not constant[None]] begin[:]
call[name[self]._mv][call[name[mv_]][constant[0]]] assign[=] tuple[[<ast.Name object at 0x7da20c6c5750>, <ast.Call object at 0x7da20c6c6e60>]]
call[name[to_append]][constant[src_pathname]] assign[=] call[name[mv_]][constant[0]]
return[call[name[self].process_default, parameter[name[raw_event], name[to_append]]]] | keyword[def] identifier[process_IN_MOVED_TO] ( identifier[self] , identifier[raw_event] ):
literal[string]
identifier[watch_] = identifier[self] . identifier[_watch_manager] . identifier[get_watch] ( identifier[raw_event] . identifier[wd] )
identifier[path_] = identifier[watch_] . identifier[path]
identifier[dst_path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path_] , identifier[raw_event] . identifier[name] ))
identifier[mv_] = identifier[self] . identifier[_mv_cookie] . identifier[get] ( identifier[raw_event] . identifier[cookie] )
identifier[to_append] ={ literal[string] : identifier[raw_event] . identifier[cookie] }
keyword[if] identifier[mv_] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_mv] [ identifier[mv_] [ literal[int] ]]=( identifier[dst_path] , identifier[datetime] . identifier[now] ())
identifier[to_append] [ literal[string] ]= identifier[mv_] [ literal[int] ]
keyword[elif] ( identifier[raw_event] . identifier[mask] & identifier[IN_ISDIR] keyword[and] identifier[watch_] . identifier[auto_add] keyword[and]
keyword[not] identifier[watch_] . identifier[exclude_filter] ( identifier[dst_path] )):
identifier[self] . identifier[_watch_manager] . identifier[add_watch] ( identifier[dst_path] , identifier[watch_] . identifier[mask] ,
identifier[proc_fun] = identifier[watch_] . identifier[proc_fun] ,
identifier[rec] = keyword[True] , identifier[auto_add] = keyword[True] ,
identifier[exclude_filter] = identifier[watch_] . identifier[exclude_filter] )
keyword[return] identifier[self] . identifier[process_default] ( identifier[raw_event] , identifier[to_append] ) | def process_IN_MOVED_TO(self, raw_event):
"""
Map the source path with the destination path (+ date for
cleaning).
"""
watch_ = self._watch_manager.get_watch(raw_event.wd)
path_ = watch_.path
dst_path = os.path.normpath(os.path.join(path_, raw_event.name))
mv_ = self._mv_cookie.get(raw_event.cookie)
to_append = {'cookie': raw_event.cookie}
if mv_ is not None:
self._mv[mv_[0]] = (dst_path, datetime.now())
# Let's assume that IN_MOVED_FROM event is always queued before
# that its associated (they share a common cookie) IN_MOVED_TO
# event is queued itself. It is then possible in that scenario
# to provide as additional information to the IN_MOVED_TO event
# the original pathname of the moved file/directory.
to_append['src_pathname'] = mv_[0] # depends on [control=['if'], data=['mv_']]
elif raw_event.mask & IN_ISDIR and watch_.auto_add and (not watch_.exclude_filter(dst_path)):
# We got a diretory that's "moved in" from an unknown source and
# auto_add is enabled. Manually add watches to the inner subtrees.
# The newly monitored directory inherits attributes from its
# parent directory.
self._watch_manager.add_watch(dst_path, watch_.mask, proc_fun=watch_.proc_fun, rec=True, auto_add=True, exclude_filter=watch_.exclude_filter) # depends on [control=['if'], data=[]]
return self.process_default(raw_event, to_append) |
def get_sectie_by_id_and_afdeling(self, id, afdeling):
'''
Get a `sectie`.
:param id: An id of a sectie. eg. "A"
:param afdeling: The :class:`Afdeling` for in which the `sectie` can \
be found. Can also be the id of and `afdeling`.
:rtype: A :class:`Sectie`.
'''
try:
aid = afdeling.id
except AttributeError:
aid = afdeling
afdeling = self.get_kadastrale_afdeling_by_id(aid)
afdeling.clear_gateway()
def creator():
url = self.base_url + '/municipality/%s/department/%s/section/%s' % (afdeling.gemeente.id, afdeling.id, id)
h = self.base_headers
p = {
'geometry': 'full',
'srs': '31370'
}
res = capakey_rest_gateway_request(url, h, p).json()
return Sectie(
res['sectionCode'],
afdeling,
self._parse_centroid(res['geometry']['center']),
self._parse_bounding_box(res['geometry']['boundingBox']),
res['geometry']['shape'],
)
if self.caches['long'].is_configured:
key = 'get_sectie_by_id_and_afdeling_rest#%s#%s' % (id, aid)
sectie = self.caches['long'].get_or_create(key, creator)
else:
sectie = creator()
sectie.set_gateway(self)
return sectie | def function[get_sectie_by_id_and_afdeling, parameter[self, id, afdeling]]:
constant[
Get a `sectie`.
:param id: An id of a sectie. eg. "A"
:param afdeling: The :class:`Afdeling` for in which the `sectie` can be found. Can also be the id of and `afdeling`.
:rtype: A :class:`Sectie`.
]
<ast.Try object at 0x7da1b0a2c910>
call[name[afdeling].clear_gateway, parameter[]]
def function[creator, parameter[]]:
variable[url] assign[=] binary_operation[name[self].base_url + binary_operation[constant[/municipality/%s/department/%s/section/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0a2f6d0>, <ast.Attribute object at 0x7da1b0a2f1c0>, <ast.Name object at 0x7da1b0a2f280>]]]]
variable[h] assign[=] name[self].base_headers
variable[p] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a2f7c0>, <ast.Constant object at 0x7da1b0a2dc00>], [<ast.Constant object at 0x7da1b0a2d810>, <ast.Constant object at 0x7da1b0a2f6a0>]]
variable[res] assign[=] call[call[name[capakey_rest_gateway_request], parameter[name[url], name[h], name[p]]].json, parameter[]]
return[call[name[Sectie], parameter[call[name[res]][constant[sectionCode]], name[afdeling], call[name[self]._parse_centroid, parameter[call[call[name[res]][constant[geometry]]][constant[center]]]], call[name[self]._parse_bounding_box, parameter[call[call[name[res]][constant[geometry]]][constant[boundingBox]]]], call[call[name[res]][constant[geometry]]][constant[shape]]]]]
if call[name[self].caches][constant[long]].is_configured begin[:]
variable[key] assign[=] binary_operation[constant[get_sectie_by_id_and_afdeling_rest#%s#%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0a2dbd0>, <ast.Name object at 0x7da1b0a2d3f0>]]]
variable[sectie] assign[=] call[call[name[self].caches][constant[long]].get_or_create, parameter[name[key], name[creator]]]
call[name[sectie].set_gateway, parameter[name[self]]]
return[name[sectie]] | keyword[def] identifier[get_sectie_by_id_and_afdeling] ( identifier[self] , identifier[id] , identifier[afdeling] ):
literal[string]
keyword[try] :
identifier[aid] = identifier[afdeling] . identifier[id]
keyword[except] identifier[AttributeError] :
identifier[aid] = identifier[afdeling]
identifier[afdeling] = identifier[self] . identifier[get_kadastrale_afdeling_by_id] ( identifier[aid] )
identifier[afdeling] . identifier[clear_gateway] ()
keyword[def] identifier[creator] ():
identifier[url] = identifier[self] . identifier[base_url] + literal[string] %( identifier[afdeling] . identifier[gemeente] . identifier[id] , identifier[afdeling] . identifier[id] , identifier[id] )
identifier[h] = identifier[self] . identifier[base_headers]
identifier[p] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[res] = identifier[capakey_rest_gateway_request] ( identifier[url] , identifier[h] , identifier[p] ). identifier[json] ()
keyword[return] identifier[Sectie] (
identifier[res] [ literal[string] ],
identifier[afdeling] ,
identifier[self] . identifier[_parse_centroid] ( identifier[res] [ literal[string] ][ literal[string] ]),
identifier[self] . identifier[_parse_bounding_box] ( identifier[res] [ literal[string] ][ literal[string] ]),
identifier[res] [ literal[string] ][ literal[string] ],
)
keyword[if] identifier[self] . identifier[caches] [ literal[string] ]. identifier[is_configured] :
identifier[key] = literal[string] %( identifier[id] , identifier[aid] )
identifier[sectie] = identifier[self] . identifier[caches] [ literal[string] ]. identifier[get_or_create] ( identifier[key] , identifier[creator] )
keyword[else] :
identifier[sectie] = identifier[creator] ()
identifier[sectie] . identifier[set_gateway] ( identifier[self] )
keyword[return] identifier[sectie] | def get_sectie_by_id_and_afdeling(self, id, afdeling):
"""
Get a `sectie`.
:param id: An id of a sectie. eg. "A"
:param afdeling: The :class:`Afdeling` for in which the `sectie` can be found. Can also be the id of and `afdeling`.
:rtype: A :class:`Sectie`.
"""
try:
aid = afdeling.id # depends on [control=['try'], data=[]]
except AttributeError:
aid = afdeling
afdeling = self.get_kadastrale_afdeling_by_id(aid) # depends on [control=['except'], data=[]]
afdeling.clear_gateway()
def creator():
url = self.base_url + '/municipality/%s/department/%s/section/%s' % (afdeling.gemeente.id, afdeling.id, id)
h = self.base_headers
p = {'geometry': 'full', 'srs': '31370'}
res = capakey_rest_gateway_request(url, h, p).json()
return Sectie(res['sectionCode'], afdeling, self._parse_centroid(res['geometry']['center']), self._parse_bounding_box(res['geometry']['boundingBox']), res['geometry']['shape'])
if self.caches['long'].is_configured:
key = 'get_sectie_by_id_and_afdeling_rest#%s#%s' % (id, aid)
sectie = self.caches['long'].get_or_create(key, creator) # depends on [control=['if'], data=[]]
else:
sectie = creator()
sectie.set_gateway(self)
return sectie |
def add_default_parameter_values(self, sam_template):
"""
Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values
"""
parameter_definition = sam_template.get("Parameters", None)
if not parameter_definition or not isinstance(parameter_definition, dict):
return self.parameter_values
for param_name, value in parameter_definition.items():
if param_name not in self.parameter_values and isinstance(value, dict) and "Default" in value:
self.parameter_values[param_name] = value["Default"] | def function[add_default_parameter_values, parameter[self, sam_template]]:
constant[
Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values
]
variable[parameter_definition] assign[=] call[name[sam_template].get, parameter[constant[Parameters], constant[None]]]
if <ast.BoolOp object at 0x7da2054a6920> begin[:]
return[name[self].parameter_values]
for taget[tuple[[<ast.Name object at 0x7da2054a70d0>, <ast.Name object at 0x7da2054a7010>]]] in starred[call[name[parameter_definition].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da2054a7a30> begin[:]
call[name[self].parameter_values][name[param_name]] assign[=] call[name[value]][constant[Default]] | keyword[def] identifier[add_default_parameter_values] ( identifier[self] , identifier[sam_template] ):
literal[string]
identifier[parameter_definition] = identifier[sam_template] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[parameter_definition] keyword[or] keyword[not] identifier[isinstance] ( identifier[parameter_definition] , identifier[dict] ):
keyword[return] identifier[self] . identifier[parameter_values]
keyword[for] identifier[param_name] , identifier[value] keyword[in] identifier[parameter_definition] . identifier[items] ():
keyword[if] identifier[param_name] keyword[not] keyword[in] identifier[self] . identifier[parameter_values] keyword[and] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[value] :
identifier[self] . identifier[parameter_values] [ identifier[param_name] ]= identifier[value] [ literal[string] ] | def add_default_parameter_values(self, sam_template):
"""
Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values
"""
parameter_definition = sam_template.get('Parameters', None)
if not parameter_definition or not isinstance(parameter_definition, dict):
return self.parameter_values # depends on [control=['if'], data=[]]
for (param_name, value) in parameter_definition.items():
if param_name not in self.parameter_values and isinstance(value, dict) and ('Default' in value):
self.parameter_values[param_name] = value['Default'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def total_num_violations(self):
"""
Returns the total number of lines in the diff
that are in violation.
"""
return sum(
len(summary.lines)
for summary
in self._diff_violations().values()
) | def function[total_num_violations, parameter[self]]:
constant[
Returns the total number of lines in the diff
that are in violation.
]
return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b26ad2a0>]]] | keyword[def] identifier[total_num_violations] ( identifier[self] ):
literal[string]
keyword[return] identifier[sum] (
identifier[len] ( identifier[summary] . identifier[lines] )
keyword[for] identifier[summary]
keyword[in] identifier[self] . identifier[_diff_violations] (). identifier[values] ()
) | def total_num_violations(self):
"""
Returns the total number of lines in the diff
that are in violation.
"""
return sum((len(summary.lines) for summary in self._diff_violations().values())) |
def x_rolls(self, number, count=0, func=sum):
'''Iterator of number dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
'''
for x in range(number):
yield self.roll(count, func) | def function[x_rolls, parameter[self, number, count, func]]:
constant[Iterator of number dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
]
for taget[name[x]] in starred[call[name[range], parameter[name[number]]]] begin[:]
<ast.Yield object at 0x7da2043469e0> | keyword[def] identifier[x_rolls] ( identifier[self] , identifier[number] , identifier[count] = literal[int] , identifier[func] = identifier[sum] ):
literal[string]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[number] ):
keyword[yield] identifier[self] . identifier[roll] ( identifier[count] , identifier[func] ) | def x_rolls(self, number, count=0, func=sum):
"""Iterator of number dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
"""
for x in range(number):
yield self.roll(count, func) # depends on [control=['for'], data=[]] |
def cluster_manager(self):
"""
Returns an instance of :class:`~.couchbase.admin.Admin` which may be
used to create and manage buckets in the cluster.
"""
credentials = self.authenticator.get_credentials()['options']
connection_string = str(self.connstr)
return Admin(credentials.get('username'), credentials.get('password'), connection_string=connection_string) | def function[cluster_manager, parameter[self]]:
constant[
Returns an instance of :class:`~.couchbase.admin.Admin` which may be
used to create and manage buckets in the cluster.
]
variable[credentials] assign[=] call[call[name[self].authenticator.get_credentials, parameter[]]][constant[options]]
variable[connection_string] assign[=] call[name[str], parameter[name[self].connstr]]
return[call[name[Admin], parameter[call[name[credentials].get, parameter[constant[username]]], call[name[credentials].get, parameter[constant[password]]]]]] | keyword[def] identifier[cluster_manager] ( identifier[self] ):
literal[string]
identifier[credentials] = identifier[self] . identifier[authenticator] . identifier[get_credentials] ()[ literal[string] ]
identifier[connection_string] = identifier[str] ( identifier[self] . identifier[connstr] )
keyword[return] identifier[Admin] ( identifier[credentials] . identifier[get] ( literal[string] ), identifier[credentials] . identifier[get] ( literal[string] ), identifier[connection_string] = identifier[connection_string] ) | def cluster_manager(self):
"""
Returns an instance of :class:`~.couchbase.admin.Admin` which may be
used to create and manage buckets in the cluster.
"""
credentials = self.authenticator.get_credentials()['options']
connection_string = str(self.connstr)
return Admin(credentials.get('username'), credentials.get('password'), connection_string=connection_string) |
def states(opts, functions, utils, serializers, whitelist=None, proxy=None, context=None):
'''
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
'''
if context is None:
context = {}
ret = LazyLoader(
_module_dirs(opts, 'states'),
opts,
tag='states',
pack={'__salt__': functions, '__proxy__': proxy or {}},
whitelist=whitelist,
)
ret.pack['__states__'] = ret
ret.pack['__utils__'] = utils
ret.pack['__serializers__'] = serializers
ret.pack['__context__'] = context
return ret | def function[states, parameter[opts, functions, utils, serializers, whitelist, proxy, context]]:
constant[
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
]
if compare[name[context] is constant[None]] begin[:]
variable[context] assign[=] dictionary[[], []]
variable[ret] assign[=] call[name[LazyLoader], parameter[call[name[_module_dirs], parameter[name[opts], constant[states]]], name[opts]]]
call[name[ret].pack][constant[__states__]] assign[=] name[ret]
call[name[ret].pack][constant[__utils__]] assign[=] name[utils]
call[name[ret].pack][constant[__serializers__]] assign[=] name[serializers]
call[name[ret].pack][constant[__context__]] assign[=] name[context]
return[name[ret]] | keyword[def] identifier[states] ( identifier[opts] , identifier[functions] , identifier[utils] , identifier[serializers] , identifier[whitelist] = keyword[None] , identifier[proxy] = keyword[None] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[context] keyword[is] keyword[None] :
identifier[context] ={}
identifier[ret] = identifier[LazyLoader] (
identifier[_module_dirs] ( identifier[opts] , literal[string] ),
identifier[opts] ,
identifier[tag] = literal[string] ,
identifier[pack] ={ literal[string] : identifier[functions] , literal[string] : identifier[proxy] keyword[or] {}},
identifier[whitelist] = identifier[whitelist] ,
)
identifier[ret] . identifier[pack] [ literal[string] ]= identifier[ret]
identifier[ret] . identifier[pack] [ literal[string] ]= identifier[utils]
identifier[ret] . identifier[pack] [ literal[string] ]= identifier[serializers]
identifier[ret] . identifier[pack] [ literal[string] ]= identifier[context]
keyword[return] identifier[ret] | def states(opts, functions, utils, serializers, whitelist=None, proxy=None, context=None):
"""
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
"""
if context is None:
context = {} # depends on [control=['if'], data=['context']]
ret = LazyLoader(_module_dirs(opts, 'states'), opts, tag='states', pack={'__salt__': functions, '__proxy__': proxy or {}}, whitelist=whitelist)
ret.pack['__states__'] = ret
ret.pack['__utils__'] = utils
ret.pack['__serializers__'] = serializers
ret.pack['__context__'] = context
return ret |
def apply_acl(self, equipments, vlan, environment, network):
'''Apply the file acl in equipments
:param equipments: list of equipments
:param vlan: Vvlan
:param environment: Environment
:param network: v4 or v6
:raise Exception: Failed to apply acl
:return: True case Apply and sysout of script
'''
vlan_map = dict()
vlan_map['equipments'] = equipments
vlan_map['vlan'] = vlan
vlan_map['environment'] = environment
vlan_map['network'] = network
url = 'vlan/apply/acl/'
code, xml = self.submit({'vlan': vlan_map}, 'POST', url)
return self.response(code, xml) | def function[apply_acl, parameter[self, equipments, vlan, environment, network]]:
constant[Apply the file acl in equipments
:param equipments: list of equipments
:param vlan: Vvlan
:param environment: Environment
:param network: v4 or v6
:raise Exception: Failed to apply acl
:return: True case Apply and sysout of script
]
variable[vlan_map] assign[=] call[name[dict], parameter[]]
call[name[vlan_map]][constant[equipments]] assign[=] name[equipments]
call[name[vlan_map]][constant[vlan]] assign[=] name[vlan]
call[name[vlan_map]][constant[environment]] assign[=] name[environment]
call[name[vlan_map]][constant[network]] assign[=] name[network]
variable[url] assign[=] constant[vlan/apply/acl/]
<ast.Tuple object at 0x7da2041d8550> assign[=] call[name[self].submit, parameter[dictionary[[<ast.Constant object at 0x7da2041da0b0>], [<ast.Name object at 0x7da2041d8490>]], constant[POST], name[url]]]
return[call[name[self].response, parameter[name[code], name[xml]]]] | keyword[def] identifier[apply_acl] ( identifier[self] , identifier[equipments] , identifier[vlan] , identifier[environment] , identifier[network] ):
literal[string]
identifier[vlan_map] = identifier[dict] ()
identifier[vlan_map] [ literal[string] ]= identifier[equipments]
identifier[vlan_map] [ literal[string] ]= identifier[vlan]
identifier[vlan_map] [ literal[string] ]= identifier[environment]
identifier[vlan_map] [ literal[string] ]= identifier[network]
identifier[url] = literal[string]
identifier[code] , identifier[xml] = identifier[self] . identifier[submit] ({ literal[string] : identifier[vlan_map] }, literal[string] , identifier[url] )
keyword[return] identifier[self] . identifier[response] ( identifier[code] , identifier[xml] ) | def apply_acl(self, equipments, vlan, environment, network):
"""Apply the file acl in equipments
:param equipments: list of equipments
:param vlan: Vvlan
:param environment: Environment
:param network: v4 or v6
:raise Exception: Failed to apply acl
:return: True case Apply and sysout of script
"""
vlan_map = dict()
vlan_map['equipments'] = equipments
vlan_map['vlan'] = vlan
vlan_map['environment'] = environment
vlan_map['network'] = network
url = 'vlan/apply/acl/'
(code, xml) = self.submit({'vlan': vlan_map}, 'POST', url)
return self.response(code, xml) |
def _migrate_library(workspace_dir: pathlib.Path, do_logging: bool=True) -> pathlib.Path:
""" Migrate library to latest version. """
library_path_11 = workspace_dir / "Nion Swift Workspace.nslib"
library_path_12 = workspace_dir / "Nion Swift Library 12.nslib"
library_path_13 = workspace_dir / "Nion Swift Library 13.nslib"
library_paths = (library_path_11, library_path_12)
library_path_latest = library_path_13
if not os.path.exists(library_path_latest):
for library_path in reversed(library_paths):
if os.path.exists(library_path):
if do_logging:
logging.info("Migrating library: %s -> %s", library_path, library_path_latest)
shutil.copyfile(library_path, library_path_latest)
break
return library_path_latest | def function[_migrate_library, parameter[workspace_dir, do_logging]]:
constant[ Migrate library to latest version. ]
variable[library_path_11] assign[=] binary_operation[name[workspace_dir] / constant[Nion Swift Workspace.nslib]]
variable[library_path_12] assign[=] binary_operation[name[workspace_dir] / constant[Nion Swift Library 12.nslib]]
variable[library_path_13] assign[=] binary_operation[name[workspace_dir] / constant[Nion Swift Library 13.nslib]]
variable[library_paths] assign[=] tuple[[<ast.Name object at 0x7da1b0e4cd00>, <ast.Name object at 0x7da1b0e4f040>]]
variable[library_path_latest] assign[=] name[library_path_13]
if <ast.UnaryOp object at 0x7da1b0e4eb60> begin[:]
for taget[name[library_path]] in starred[call[name[reversed], parameter[name[library_paths]]]] begin[:]
if call[name[os].path.exists, parameter[name[library_path]]] begin[:]
if name[do_logging] begin[:]
call[name[logging].info, parameter[constant[Migrating library: %s -> %s], name[library_path], name[library_path_latest]]]
call[name[shutil].copyfile, parameter[name[library_path], name[library_path_latest]]]
break
return[name[library_path_latest]] | keyword[def] identifier[_migrate_library] ( identifier[workspace_dir] : identifier[pathlib] . identifier[Path] , identifier[do_logging] : identifier[bool] = keyword[True] )-> identifier[pathlib] . identifier[Path] :
literal[string]
identifier[library_path_11] = identifier[workspace_dir] / literal[string]
identifier[library_path_12] = identifier[workspace_dir] / literal[string]
identifier[library_path_13] = identifier[workspace_dir] / literal[string]
identifier[library_paths] =( identifier[library_path_11] , identifier[library_path_12] )
identifier[library_path_latest] = identifier[library_path_13]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[library_path_latest] ):
keyword[for] identifier[library_path] keyword[in] identifier[reversed] ( identifier[library_paths] ):
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[library_path] ):
keyword[if] identifier[do_logging] :
identifier[logging] . identifier[info] ( literal[string] , identifier[library_path] , identifier[library_path_latest] )
identifier[shutil] . identifier[copyfile] ( identifier[library_path] , identifier[library_path_latest] )
keyword[break]
keyword[return] identifier[library_path_latest] | def _migrate_library(workspace_dir: pathlib.Path, do_logging: bool=True) -> pathlib.Path:
""" Migrate library to latest version. """
library_path_11 = workspace_dir / 'Nion Swift Workspace.nslib'
library_path_12 = workspace_dir / 'Nion Swift Library 12.nslib'
library_path_13 = workspace_dir / 'Nion Swift Library 13.nslib'
library_paths = (library_path_11, library_path_12)
library_path_latest = library_path_13
if not os.path.exists(library_path_latest):
for library_path in reversed(library_paths):
if os.path.exists(library_path):
if do_logging:
logging.info('Migrating library: %s -> %s', library_path, library_path_latest) # depends on [control=['if'], data=[]]
shutil.copyfile(library_path, library_path_latest)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['library_path']] # depends on [control=['if'], data=[]]
return library_path_latest |
def user_exists(name):
"""
Check if a PostgreSQL user exists.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'),
warn_only=True):
res = _run_as_pg('''psql -t -A -c "SELECT COUNT(*) FROM pg_user WHERE usename = '%(name)s';"''' % locals())
return (res == "1") | def function[user_exists, parameter[name]]:
constant[
Check if a PostgreSQL user exists.
]
with call[name[settings], parameter[call[name[hide], parameter[constant[running], constant[stdout], constant[stderr], constant[warnings]]]]] begin[:]
variable[res] assign[=] call[name[_run_as_pg], parameter[binary_operation[constant[psql -t -A -c "SELECT COUNT(*) FROM pg_user WHERE usename = '%(name)s';"] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]]]
return[compare[name[res] equal[==] constant[1]]] | keyword[def] identifier[user_exists] ( identifier[name] ):
literal[string]
keyword[with] identifier[settings] ( identifier[hide] ( literal[string] , literal[string] , literal[string] , literal[string] ),
identifier[warn_only] = keyword[True] ):
identifier[res] = identifier[_run_as_pg] ( literal[string] % identifier[locals] ())
keyword[return] ( identifier[res] == literal[string] ) | def user_exists(name):
"""
Check if a PostgreSQL user exists.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = _run_as_pg('psql -t -A -c "SELECT COUNT(*) FROM pg_user WHERE usename = \'%(name)s\';"' % locals()) # depends on [control=['with'], data=[]]
return res == '1' |
def get_stp_mst_detail_output_cist_port_oper_bpdu_filter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
port = ET.SubElement(cist, "port")
oper_bpdu_filter = ET.SubElement(port, "oper-bpdu-filter")
oper_bpdu_filter.text = kwargs.pop('oper_bpdu_filter')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_stp_mst_detail_output_cist_port_oper_bpdu_filter, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_stp_mst_detail] assign[=] call[name[ET].Element, parameter[constant[get_stp_mst_detail]]]
variable[config] assign[=] name[get_stp_mst_detail]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_stp_mst_detail], constant[output]]]
variable[cist] assign[=] call[name[ET].SubElement, parameter[name[output], constant[cist]]]
variable[port] assign[=] call[name[ET].SubElement, parameter[name[cist], constant[port]]]
variable[oper_bpdu_filter] assign[=] call[name[ET].SubElement, parameter[name[port], constant[oper-bpdu-filter]]]
name[oper_bpdu_filter].text assign[=] call[name[kwargs].pop, parameter[constant[oper_bpdu_filter]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_stp_mst_detail_output_cist_port_oper_bpdu_filter] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_stp_mst_detail] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_stp_mst_detail]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_stp_mst_detail] , literal[string] )
identifier[cist] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[port] = identifier[ET] . identifier[SubElement] ( identifier[cist] , literal[string] )
identifier[oper_bpdu_filter] = identifier[ET] . identifier[SubElement] ( identifier[port] , literal[string] )
identifier[oper_bpdu_filter] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_stp_mst_detail_output_cist_port_oper_bpdu_filter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_stp_mst_detail = ET.Element('get_stp_mst_detail')
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, 'output')
cist = ET.SubElement(output, 'cist')
port = ET.SubElement(cist, 'port')
oper_bpdu_filter = ET.SubElement(port, 'oper-bpdu-filter')
oper_bpdu_filter.text = kwargs.pop('oper_bpdu_filter')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def parse(self, importpath):
"""Parse import path. Determine if the path is native or starts with known prefix.
:param importpath: import path to parse
:type importpath: str
:return: bool
"""
# reset default values
self.native = False
self._prefix = ""
self._package = ""
url = re.sub(r'http://', '', importpath)
url = re.sub(r'https://', '', url)
# is import path native package?
if url.split('/')[0] in self.native_packages["packages"]:
self.native = True
return self
for regex in self.known_ipprefixes:
match = re.search(regex, url)
if match:
self._prefix = match.group(1)
if match.group(3):
self._package = match.group(3)
return self
raise ValueError("Import path prefix for '%s' not recognized" % importpath) | def function[parse, parameter[self, importpath]]:
constant[Parse import path. Determine if the path is native or starts with known prefix.
:param importpath: import path to parse
:type importpath: str
:return: bool
]
name[self].native assign[=] constant[False]
name[self]._prefix assign[=] constant[]
name[self]._package assign[=] constant[]
variable[url] assign[=] call[name[re].sub, parameter[constant[http://], constant[], name[importpath]]]
variable[url] assign[=] call[name[re].sub, parameter[constant[https://], constant[], name[url]]]
if compare[call[call[name[url].split, parameter[constant[/]]]][constant[0]] in call[name[self].native_packages][constant[packages]]] begin[:]
name[self].native assign[=] constant[True]
return[name[self]]
for taget[name[regex]] in starred[name[self].known_ipprefixes] begin[:]
variable[match] assign[=] call[name[re].search, parameter[name[regex], name[url]]]
if name[match] begin[:]
name[self]._prefix assign[=] call[name[match].group, parameter[constant[1]]]
if call[name[match].group, parameter[constant[3]]] begin[:]
name[self]._package assign[=] call[name[match].group, parameter[constant[3]]]
return[name[self]]
<ast.Raise object at 0x7da1b222dd20> | keyword[def] identifier[parse] ( identifier[self] , identifier[importpath] ):
literal[string]
identifier[self] . identifier[native] = keyword[False]
identifier[self] . identifier[_prefix] = literal[string]
identifier[self] . identifier[_package] = literal[string]
identifier[url] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[importpath] )
identifier[url] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[url] )
keyword[if] identifier[url] . identifier[split] ( literal[string] )[ literal[int] ] keyword[in] identifier[self] . identifier[native_packages] [ literal[string] ]:
identifier[self] . identifier[native] = keyword[True]
keyword[return] identifier[self]
keyword[for] identifier[regex] keyword[in] identifier[self] . identifier[known_ipprefixes] :
identifier[match] = identifier[re] . identifier[search] ( identifier[regex] , identifier[url] )
keyword[if] identifier[match] :
identifier[self] . identifier[_prefix] = identifier[match] . identifier[group] ( literal[int] )
keyword[if] identifier[match] . identifier[group] ( literal[int] ):
identifier[self] . identifier[_package] = identifier[match] . identifier[group] ( literal[int] )
keyword[return] identifier[self]
keyword[raise] identifier[ValueError] ( literal[string] % identifier[importpath] ) | def parse(self, importpath):
"""Parse import path. Determine if the path is native or starts with known prefix.
:param importpath: import path to parse
:type importpath: str
:return: bool
""" # reset default values
self.native = False
self._prefix = ''
self._package = ''
url = re.sub('http://', '', importpath)
url = re.sub('https://', '', url) # is import path native package?
if url.split('/')[0] in self.native_packages['packages']:
self.native = True
return self # depends on [control=['if'], data=[]]
for regex in self.known_ipprefixes:
match = re.search(regex, url)
if match:
self._prefix = match.group(1)
if match.group(3):
self._package = match.group(3) # depends on [control=['if'], data=[]]
return self # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['regex']]
raise ValueError("Import path prefix for '%s' not recognized" % importpath) |
def floating_ip_associate(name, kwargs, call=None):
'''
Associate a floating IP address to a server
.. versionadded:: 2016.3.0
'''
if call != 'action':
raise SaltCloudSystemExit(
'The floating_ip_associate action must be called with -a of --action.'
)
if 'floating_ip' not in kwargs:
log.error('floating_ip is required')
return False
conn = get_conn()
conn.floating_ip_associate(name, kwargs['floating_ip'])
return list_nodes()[name] | def function[floating_ip_associate, parameter[name, kwargs, call]]:
constant[
Associate a floating IP address to a server
.. versionadded:: 2016.3.0
]
if compare[name[call] not_equal[!=] constant[action]] begin[:]
<ast.Raise object at 0x7da1b1c303d0>
if compare[constant[floating_ip] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[log].error, parameter[constant[floating_ip is required]]]
return[constant[False]]
variable[conn] assign[=] call[name[get_conn], parameter[]]
call[name[conn].floating_ip_associate, parameter[name[name], call[name[kwargs]][constant[floating_ip]]]]
return[call[call[name[list_nodes], parameter[]]][name[name]]] | keyword[def] identifier[floating_ip_associate] ( identifier[name] , identifier[kwargs] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] != literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
)
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[conn] = identifier[get_conn] ()
identifier[conn] . identifier[floating_ip_associate] ( identifier[name] , identifier[kwargs] [ literal[string] ])
keyword[return] identifier[list_nodes] ()[ identifier[name] ] | def floating_ip_associate(name, kwargs, call=None):
"""
Associate a floating IP address to a server
.. versionadded:: 2016.3.0
"""
if call != 'action':
raise SaltCloudSystemExit('The floating_ip_associate action must be called with -a of --action.') # depends on [control=['if'], data=[]]
if 'floating_ip' not in kwargs:
log.error('floating_ip is required')
return False # depends on [control=['if'], data=[]]
conn = get_conn()
conn.floating_ip_associate(name, kwargs['floating_ip'])
return list_nodes()[name] |
def playlist_songs_add(
self,
songs,
playlist,
*,
after=None,
before=None,
index=None,
position=None
):
"""Add songs to a playlist.
Note:
* Provide no optional arguments to add to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to add to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
songs (list): A list of song dicts.
playlist (dict): A playlist dict.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
"""
playlist_songs = self.playlist_songs(playlist)
prev, next_ = get_ple_prev_next(
playlist_songs,
after=after,
before=before,
index=index,
position=position
)
songs_len = len(songs)
for i, song in enumerate(songs):
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
mutation = mc_calls.PlaylistEntriesBatch.create(
song_id, playlist['id'],
preceding_entry_id=prev.get('id'),
following_entry_id=next_.get('id')
)
response = self._call(mc_calls.PlaylistEntriesBatch, mutation)
result = response.body['mutate_response'][0]
# TODO: Proper exception on failure.
if result['response_code'] != 'OK':
break
if i < songs_len - 1:
while True:
prev = self.playlist_song(result['id'])
if prev:
break
return self.playlist(playlist['id'], include_songs=True) | def function[playlist_songs_add, parameter[self, songs, playlist]]:
constant[Add songs to a playlist.
Note:
* Provide no optional arguments to add to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to add to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
songs (list): A list of song dicts.
playlist (dict): A playlist dict.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
]
variable[playlist_songs] assign[=] call[name[self].playlist_songs, parameter[name[playlist]]]
<ast.Tuple object at 0x7da2054a73a0> assign[=] call[name[get_ple_prev_next], parameter[name[playlist_songs]]]
variable[songs_len] assign[=] call[name[len], parameter[name[songs]]]
for taget[tuple[[<ast.Name object at 0x7da2054a6a10>, <ast.Name object at 0x7da2054a6e90>]]] in starred[call[name[enumerate], parameter[name[songs]]]] begin[:]
if compare[constant[storeId] in name[song]] begin[:]
variable[song_id] assign[=] call[name[song]][constant[storeId]]
variable[mutation] assign[=] call[name[mc_calls].PlaylistEntriesBatch.create, parameter[name[song_id], call[name[playlist]][constant[id]]]]
variable[response] assign[=] call[name[self]._call, parameter[name[mc_calls].PlaylistEntriesBatch, name[mutation]]]
variable[result] assign[=] call[call[name[response].body][constant[mutate_response]]][constant[0]]
if compare[call[name[result]][constant[response_code]] not_equal[!=] constant[OK]] begin[:]
break
if compare[name[i] less[<] binary_operation[name[songs_len] - constant[1]]] begin[:]
while constant[True] begin[:]
variable[prev] assign[=] call[name[self].playlist_song, parameter[call[name[result]][constant[id]]]]
if name[prev] begin[:]
break
return[call[name[self].playlist, parameter[call[name[playlist]][constant[id]]]]] | keyword[def] identifier[playlist_songs_add] (
identifier[self] ,
identifier[songs] ,
identifier[playlist] ,
*,
identifier[after] = keyword[None] ,
identifier[before] = keyword[None] ,
identifier[index] = keyword[None] ,
identifier[position] = keyword[None]
):
literal[string]
identifier[playlist_songs] = identifier[self] . identifier[playlist_songs] ( identifier[playlist] )
identifier[prev] , identifier[next_] = identifier[get_ple_prev_next] (
identifier[playlist_songs] ,
identifier[after] = identifier[after] ,
identifier[before] = identifier[before] ,
identifier[index] = identifier[index] ,
identifier[position] = identifier[position]
)
identifier[songs_len] = identifier[len] ( identifier[songs] )
keyword[for] identifier[i] , identifier[song] keyword[in] identifier[enumerate] ( identifier[songs] ):
keyword[if] literal[string] keyword[in] identifier[song] :
identifier[song_id] = identifier[song] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[song] :
identifier[song_id] = identifier[song] [ literal[string] ]
keyword[else] :
identifier[song_id] = identifier[song] [ literal[string] ]
identifier[mutation] = identifier[mc_calls] . identifier[PlaylistEntriesBatch] . identifier[create] (
identifier[song_id] , identifier[playlist] [ literal[string] ],
identifier[preceding_entry_id] = identifier[prev] . identifier[get] ( literal[string] ),
identifier[following_entry_id] = identifier[next_] . identifier[get] ( literal[string] )
)
identifier[response] = identifier[self] . identifier[_call] ( identifier[mc_calls] . identifier[PlaylistEntriesBatch] , identifier[mutation] )
identifier[result] = identifier[response] . identifier[body] [ literal[string] ][ literal[int] ]
keyword[if] identifier[result] [ literal[string] ]!= literal[string] :
keyword[break]
keyword[if] identifier[i] < identifier[songs_len] - literal[int] :
keyword[while] keyword[True] :
identifier[prev] = identifier[self] . identifier[playlist_song] ( identifier[result] [ literal[string] ])
keyword[if] identifier[prev] :
keyword[break]
keyword[return] identifier[self] . identifier[playlist] ( identifier[playlist] [ literal[string] ], identifier[include_songs] = keyword[True] ) | def playlist_songs_add(self, songs, playlist, *, after=None, before=None, index=None, position=None):
"""Add songs to a playlist.
Note:
* Provide no optional arguments to add to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to add to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
songs (list): A list of song dicts.
playlist (dict): A playlist dict.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
"""
playlist_songs = self.playlist_songs(playlist)
(prev, next_) = get_ple_prev_next(playlist_songs, after=after, before=before, index=index, position=position)
songs_len = len(songs)
for (i, song) in enumerate(songs):
if 'storeId' in song:
song_id = song['storeId'] # depends on [control=['if'], data=['song']]
elif 'trackId' in song:
song_id = song['trackId'] # depends on [control=['if'], data=['song']]
else:
song_id = song['id']
mutation = mc_calls.PlaylistEntriesBatch.create(song_id, playlist['id'], preceding_entry_id=prev.get('id'), following_entry_id=next_.get('id'))
response = self._call(mc_calls.PlaylistEntriesBatch, mutation)
result = response.body['mutate_response'][0] # TODO: Proper exception on failure.
if result['response_code'] != 'OK':
break # depends on [control=['if'], data=[]]
if i < songs_len - 1:
while True:
prev = self.playlist_song(result['id'])
if prev:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return self.playlist(playlist['id'], include_songs=True) |
def set_course_timetable(self, course_id, timetables_course_section_id=None, timetables_course_section_id_end_time=None, timetables_course_section_id_location_name=None, timetables_course_section_id_start_time=None, timetables_course_section_id_weekdays=None):
"""
Set a course timetable.
Creates and updates "timetable" events for a course.
Can automaticaly generate a series of calendar events based on simple schedules
(e.g. "Monday and Wednesday at 2:00pm" )
Existing timetable events for the course and course sections
will be updated if they still are part of the timetable.
Otherwise, they will be deleted.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - timetables[course_section_id]
"""An array of timetable objects for the course section specified by course_section_id.
If course_section_id is set to "all", events will be created for the entire course."""
if timetables_course_section_id is not None:
data["timetables[course_section_id]"] = timetables_course_section_id
# OPTIONAL - timetables[course_section_id][weekdays]
"""A comma-separated list of abbreviated weekdays
(Mon-Monday, Tue-Tuesday, Wed-Wednesday, Thu-Thursday, Fri-Friday, Sat-Saturday, Sun-Sunday)"""
if timetables_course_section_id_weekdays is not None:
data["timetables[course_section_id][weekdays]"] = timetables_course_section_id_weekdays
# OPTIONAL - timetables[course_section_id][start_time]
"""Time to start each event at (e.g. "9:00 am")"""
if timetables_course_section_id_start_time is not None:
data["timetables[course_section_id][start_time]"] = timetables_course_section_id_start_time
# OPTIONAL - timetables[course_section_id][end_time]
"""Time to end each event at (e.g. "9:00 am")"""
if timetables_course_section_id_end_time is not None:
data["timetables[course_section_id][end_time]"] = timetables_course_section_id_end_time
# OPTIONAL - timetables[course_section_id][location_name]
"""A location name to set for each event"""
if timetables_course_section_id_location_name is not None:
data["timetables[course_section_id][location_name]"] = timetables_course_section_id_location_name
self.logger.debug("POST /api/v1/courses/{course_id}/calendar_events/timetable with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/calendar_events/timetable".format(**path), data=data, params=params, no_data=True) | def function[set_course_timetable, parameter[self, course_id, timetables_course_section_id, timetables_course_section_id_end_time, timetables_course_section_id_location_name, timetables_course_section_id_start_time, timetables_course_section_id_weekdays]]:
constant[
Set a course timetable.
Creates and updates "timetable" events for a course.
Can automaticaly generate a series of calendar events based on simple schedules
(e.g. "Monday and Wednesday at 2:00pm" )
Existing timetable events for the course and course sections
will be updated if they still are part of the timetable.
Otherwise, they will be deleted.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[course_id]] assign[=] name[course_id]
constant[An array of timetable objects for the course section specified by course_section_id.
If course_section_id is set to "all", events will be created for the entire course.]
if compare[name[timetables_course_section_id] is_not constant[None]] begin[:]
call[name[data]][constant[timetables[course_section_id]]] assign[=] name[timetables_course_section_id]
constant[A comma-separated list of abbreviated weekdays
(Mon-Monday, Tue-Tuesday, Wed-Wednesday, Thu-Thursday, Fri-Friday, Sat-Saturday, Sun-Sunday)]
if compare[name[timetables_course_section_id_weekdays] is_not constant[None]] begin[:]
call[name[data]][constant[timetables[course_section_id][weekdays]]] assign[=] name[timetables_course_section_id_weekdays]
constant[Time to start each event at (e.g. "9:00 am")]
if compare[name[timetables_course_section_id_start_time] is_not constant[None]] begin[:]
call[name[data]][constant[timetables[course_section_id][start_time]]] assign[=] name[timetables_course_section_id_start_time]
constant[Time to end each event at (e.g. "9:00 am")]
if compare[name[timetables_course_section_id_end_time] is_not constant[None]] begin[:]
call[name[data]][constant[timetables[course_section_id][end_time]]] assign[=] name[timetables_course_section_id_end_time]
constant[A location name to set for each event]
if compare[name[timetables_course_section_id_location_name] is_not constant[None]] begin[:]
call[name[data]][constant[timetables[course_section_id][location_name]]] assign[=] name[timetables_course_section_id_location_name]
call[name[self].logger.debug, parameter[call[constant[POST /api/v1/courses/{course_id}/calendar_events/timetable with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[POST], call[constant[/api/v1/courses/{course_id}/calendar_events/timetable].format, parameter[]]]]] | keyword[def] identifier[set_course_timetable] ( identifier[self] , identifier[course_id] , identifier[timetables_course_section_id] = keyword[None] , identifier[timetables_course_section_id_end_time] = keyword[None] , identifier[timetables_course_section_id_location_name] = keyword[None] , identifier[timetables_course_section_id_start_time] = keyword[None] , identifier[timetables_course_section_id_weekdays] = keyword[None] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[course_id]
literal[string]
keyword[if] identifier[timetables_course_section_id] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[timetables_course_section_id]
literal[string]
keyword[if] identifier[timetables_course_section_id_weekdays] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[timetables_course_section_id_weekdays]
literal[string]
keyword[if] identifier[timetables_course_section_id_start_time] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[timetables_course_section_id_start_time]
literal[string]
keyword[if] identifier[timetables_course_section_id_end_time] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[timetables_course_section_id_end_time]
literal[string]
keyword[if] identifier[timetables_course_section_id_location_name] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[timetables_course_section_id_location_name]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[no_data] = keyword[True] ) | def set_course_timetable(self, course_id, timetables_course_section_id=None, timetables_course_section_id_end_time=None, timetables_course_section_id_location_name=None, timetables_course_section_id_start_time=None, timetables_course_section_id_weekdays=None):
"""
Set a course timetable.
Creates and updates "timetable" events for a course.
Can automaticaly generate a series of calendar events based on simple schedules
(e.g. "Monday and Wednesday at 2:00pm" )
Existing timetable events for the course and course sections
will be updated if they still are part of the timetable.
Otherwise, they will be deleted.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - course_id
'ID'
path['course_id'] = course_id # OPTIONAL - timetables[course_section_id]
'An array of timetable objects for the course section specified by course_section_id.\n If course_section_id is set to "all", events will be created for the entire course.'
if timetables_course_section_id is not None:
data['timetables[course_section_id]'] = timetables_course_section_id # depends on [control=['if'], data=['timetables_course_section_id']] # OPTIONAL - timetables[course_section_id][weekdays]
'A comma-separated list of abbreviated weekdays\n (Mon-Monday, Tue-Tuesday, Wed-Wednesday, Thu-Thursday, Fri-Friday, Sat-Saturday, Sun-Sunday)'
if timetables_course_section_id_weekdays is not None:
data['timetables[course_section_id][weekdays]'] = timetables_course_section_id_weekdays # depends on [control=['if'], data=['timetables_course_section_id_weekdays']] # OPTIONAL - timetables[course_section_id][start_time]
'Time to start each event at (e.g. "9:00 am")'
if timetables_course_section_id_start_time is not None:
data['timetables[course_section_id][start_time]'] = timetables_course_section_id_start_time # depends on [control=['if'], data=['timetables_course_section_id_start_time']] # OPTIONAL - timetables[course_section_id][end_time]
'Time to end each event at (e.g. "9:00 am")'
if timetables_course_section_id_end_time is not None:
data['timetables[course_section_id][end_time]'] = timetables_course_section_id_end_time # depends on [control=['if'], data=['timetables_course_section_id_end_time']] # OPTIONAL - timetables[course_section_id][location_name]
'A location name to set for each event'
if timetables_course_section_id_location_name is not None:
data['timetables[course_section_id][location_name]'] = timetables_course_section_id_location_name # depends on [control=['if'], data=['timetables_course_section_id_location_name']]
self.logger.debug('POST /api/v1/courses/{course_id}/calendar_events/timetable with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('POST', '/api/v1/courses/{course_id}/calendar_events/timetable'.format(**path), data=data, params=params, no_data=True) |
def _parse_fields_string(self, field_names):
"""
Convert raw string or list of names to actual column names:
- names starting with '-' indicate to suppress that field
- '*' means include all other field names
- if no fields are specifically included, then all fields are used
:param field_names: str or list
:return: expanded list of field names
"""
if isinstance(field_names, basestring):
field_names = field_names.split()
if not self.obs:
return field_names
suppress_names = [nm[1:] for nm in field_names if nm.startswith('-')]
field_names = [nm for nm in field_names if not nm.startswith('-')]
if not field_names:
field_names = ['*']
if '*' in field_names:
star_fields = [name for name in _object_attrnames(self[0]) if name not in field_names]
fn_iter = iter(field_names)
field_names = list(takewhile(lambda x: x != '*', fn_iter)) + star_fields + list(fn_iter)
field_names = [nm for nm in field_names if nm not in suppress_names]
return field_names | def function[_parse_fields_string, parameter[self, field_names]]:
constant[
Convert raw string or list of names to actual column names:
- names starting with '-' indicate to suppress that field
- '*' means include all other field names
- if no fields are specifically included, then all fields are used
:param field_names: str or list
:return: expanded list of field names
]
if call[name[isinstance], parameter[name[field_names], name[basestring]]] begin[:]
variable[field_names] assign[=] call[name[field_names].split, parameter[]]
if <ast.UnaryOp object at 0x7da1b25062c0> begin[:]
return[name[field_names]]
variable[suppress_names] assign[=] <ast.ListComp object at 0x7da1b2504670>
variable[field_names] assign[=] <ast.ListComp object at 0x7da1b2504c10>
if <ast.UnaryOp object at 0x7da1b2507df0> begin[:]
variable[field_names] assign[=] list[[<ast.Constant object at 0x7da1b25057e0>]]
if compare[constant[*] in name[field_names]] begin[:]
variable[star_fields] assign[=] <ast.ListComp object at 0x7da1b2506ec0>
variable[fn_iter] assign[=] call[name[iter], parameter[name[field_names]]]
variable[field_names] assign[=] binary_operation[binary_operation[call[name[list], parameter[call[name[takewhile], parameter[<ast.Lambda object at 0x7da20e956d70>, name[fn_iter]]]]] + name[star_fields]] + call[name[list], parameter[name[fn_iter]]]]
variable[field_names] assign[=] <ast.ListComp object at 0x7da20e957b80>
return[name[field_names]] | keyword[def] identifier[_parse_fields_string] ( identifier[self] , identifier[field_names] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[field_names] , identifier[basestring] ):
identifier[field_names] = identifier[field_names] . identifier[split] ()
keyword[if] keyword[not] identifier[self] . identifier[obs] :
keyword[return] identifier[field_names]
identifier[suppress_names] =[ identifier[nm] [ literal[int] :] keyword[for] identifier[nm] keyword[in] identifier[field_names] keyword[if] identifier[nm] . identifier[startswith] ( literal[string] )]
identifier[field_names] =[ identifier[nm] keyword[for] identifier[nm] keyword[in] identifier[field_names] keyword[if] keyword[not] identifier[nm] . identifier[startswith] ( literal[string] )]
keyword[if] keyword[not] identifier[field_names] :
identifier[field_names] =[ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[field_names] :
identifier[star_fields] =[ identifier[name] keyword[for] identifier[name] keyword[in] identifier[_object_attrnames] ( identifier[self] [ literal[int] ]) keyword[if] identifier[name] keyword[not] keyword[in] identifier[field_names] ]
identifier[fn_iter] = identifier[iter] ( identifier[field_names] )
identifier[field_names] = identifier[list] ( identifier[takewhile] ( keyword[lambda] identifier[x] : identifier[x] != literal[string] , identifier[fn_iter] ))+ identifier[star_fields] + identifier[list] ( identifier[fn_iter] )
identifier[field_names] =[ identifier[nm] keyword[for] identifier[nm] keyword[in] identifier[field_names] keyword[if] identifier[nm] keyword[not] keyword[in] identifier[suppress_names] ]
keyword[return] identifier[field_names] | def _parse_fields_string(self, field_names):
"""
Convert raw string or list of names to actual column names:
- names starting with '-' indicate to suppress that field
- '*' means include all other field names
- if no fields are specifically included, then all fields are used
:param field_names: str or list
:return: expanded list of field names
"""
if isinstance(field_names, basestring):
field_names = field_names.split() # depends on [control=['if'], data=[]]
if not self.obs:
return field_names # depends on [control=['if'], data=[]]
suppress_names = [nm[1:] for nm in field_names if nm.startswith('-')]
field_names = [nm for nm in field_names if not nm.startswith('-')]
if not field_names:
field_names = ['*'] # depends on [control=['if'], data=[]]
if '*' in field_names:
star_fields = [name for name in _object_attrnames(self[0]) if name not in field_names]
fn_iter = iter(field_names)
field_names = list(takewhile(lambda x: x != '*', fn_iter)) + star_fields + list(fn_iter) # depends on [control=['if'], data=['field_names']]
field_names = [nm for nm in field_names if nm not in suppress_names]
return field_names |
def get_dep_names_of_package(
package,
keep_version_pins=False,
recursive=False,
verbose=False,
include_build_requirements=False
):
""" Gets the dependencies from the package in the given folder,
then attempts to deduce the actual package name resulting
from each dependency line, stripping away everything else.
"""
# First, obtain the dependencies:
dependencies = get_package_dependencies(
package, recursive=recursive, verbose=verbose,
include_build_requirements=include_build_requirements,
)
if verbose:
print("get_dep_names_of_package_folder: " +
"processing dependency list to names: " +
str(dependencies))
# Transform dependencies to their stripped down names:
# (they can still have version pins/restrictions, conditionals, ...)
dependency_names = set()
for dep in dependencies:
# If we are supposed to keep exact version pins, extract first:
pin_to_append = ""
if keep_version_pins and "(==" in dep and dep.endswith(")"):
# This is a dependency of the format: 'pkg (==1.0)'
pin_to_append = "==" + dep.rpartition("==")[2][:-1]
elif keep_version_pins and "==" in dep and not dep.endswith(")"):
# This is a dependency of the format: 'pkg==1.0'
pin_to_append = "==" + dep.rpartition("==")[2]
# Now get true (and e.g. case-corrected) dependency name:
dep_name = get_package_name(dep) + pin_to_append
dependency_names.add(dep_name)
return dependency_names | def function[get_dep_names_of_package, parameter[package, keep_version_pins, recursive, verbose, include_build_requirements]]:
constant[ Gets the dependencies from the package in the given folder,
then attempts to deduce the actual package name resulting
from each dependency line, stripping away everything else.
]
variable[dependencies] assign[=] call[name[get_package_dependencies], parameter[name[package]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[get_dep_names_of_package_folder: ] + constant[processing dependency list to names: ]] + call[name[str], parameter[name[dependencies]]]]]]
variable[dependency_names] assign[=] call[name[set], parameter[]]
for taget[name[dep]] in starred[name[dependencies]] begin[:]
variable[pin_to_append] assign[=] constant[]
if <ast.BoolOp object at 0x7da1b21e1390> begin[:]
variable[pin_to_append] assign[=] binary_operation[constant[==] + call[call[call[name[dep].rpartition, parameter[constant[==]]]][constant[2]]][<ast.Slice object at 0x7da1b21e0c70>]]
variable[dep_name] assign[=] binary_operation[call[name[get_package_name], parameter[name[dep]]] + name[pin_to_append]]
call[name[dependency_names].add, parameter[name[dep_name]]]
return[name[dependency_names]] | keyword[def] identifier[get_dep_names_of_package] (
identifier[package] ,
identifier[keep_version_pins] = keyword[False] ,
identifier[recursive] = keyword[False] ,
identifier[verbose] = keyword[False] ,
identifier[include_build_requirements] = keyword[False]
):
literal[string]
identifier[dependencies] = identifier[get_package_dependencies] (
identifier[package] , identifier[recursive] = identifier[recursive] , identifier[verbose] = identifier[verbose] ,
identifier[include_build_requirements] = identifier[include_build_requirements] ,
)
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] +
literal[string] +
identifier[str] ( identifier[dependencies] ))
identifier[dependency_names] = identifier[set] ()
keyword[for] identifier[dep] keyword[in] identifier[dependencies] :
identifier[pin_to_append] = literal[string]
keyword[if] identifier[keep_version_pins] keyword[and] literal[string] keyword[in] identifier[dep] keyword[and] identifier[dep] . identifier[endswith] ( literal[string] ):
identifier[pin_to_append] = literal[string] + identifier[dep] . identifier[rpartition] ( literal[string] )[ literal[int] ][:- literal[int] ]
keyword[elif] identifier[keep_version_pins] keyword[and] literal[string] keyword[in] identifier[dep] keyword[and] keyword[not] identifier[dep] . identifier[endswith] ( literal[string] ):
identifier[pin_to_append] = literal[string] + identifier[dep] . identifier[rpartition] ( literal[string] )[ literal[int] ]
identifier[dep_name] = identifier[get_package_name] ( identifier[dep] )+ identifier[pin_to_append]
identifier[dependency_names] . identifier[add] ( identifier[dep_name] )
keyword[return] identifier[dependency_names] | def get_dep_names_of_package(package, keep_version_pins=False, recursive=False, verbose=False, include_build_requirements=False):
""" Gets the dependencies from the package in the given folder,
then attempts to deduce the actual package name resulting
from each dependency line, stripping away everything else.
"""
# First, obtain the dependencies:
dependencies = get_package_dependencies(package, recursive=recursive, verbose=verbose, include_build_requirements=include_build_requirements)
if verbose:
print('get_dep_names_of_package_folder: ' + 'processing dependency list to names: ' + str(dependencies)) # depends on [control=['if'], data=[]]
# Transform dependencies to their stripped down names:
# (they can still have version pins/restrictions, conditionals, ...)
dependency_names = set()
for dep in dependencies:
# If we are supposed to keep exact version pins, extract first:
pin_to_append = ''
if keep_version_pins and '(==' in dep and dep.endswith(')'):
# This is a dependency of the format: 'pkg (==1.0)'
pin_to_append = '==' + dep.rpartition('==')[2][:-1] # depends on [control=['if'], data=[]]
elif keep_version_pins and '==' in dep and (not dep.endswith(')')):
# This is a dependency of the format: 'pkg==1.0'
pin_to_append = '==' + dep.rpartition('==')[2] # depends on [control=['if'], data=[]]
# Now get true (and e.g. case-corrected) dependency name:
dep_name = get_package_name(dep) + pin_to_append
dependency_names.add(dep_name) # depends on [control=['for'], data=['dep']]
return dependency_names |
def _upstart_disable(name):
'''
Disable an Upstart service.
'''
if _upstart_is_disabled(name):
return _upstart_is_disabled(name)
override = '/etc/init/{0}.override'.format(name)
with salt.utils.files.fopen(override, 'a') as ofile:
ofile.write(salt.utils.stringutils.to_str('manual\n'))
return _upstart_is_disabled(name) | def function[_upstart_disable, parameter[name]]:
constant[
Disable an Upstart service.
]
if call[name[_upstart_is_disabled], parameter[name[name]]] begin[:]
return[call[name[_upstart_is_disabled], parameter[name[name]]]]
variable[override] assign[=] call[constant[/etc/init/{0}.override].format, parameter[name[name]]]
with call[name[salt].utils.files.fopen, parameter[name[override], constant[a]]] begin[:]
call[name[ofile].write, parameter[call[name[salt].utils.stringutils.to_str, parameter[constant[manual
]]]]]
return[call[name[_upstart_is_disabled], parameter[name[name]]]] | keyword[def] identifier[_upstart_disable] ( identifier[name] ):
literal[string]
keyword[if] identifier[_upstart_is_disabled] ( identifier[name] ):
keyword[return] identifier[_upstart_is_disabled] ( identifier[name] )
identifier[override] = literal[string] . identifier[format] ( identifier[name] )
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[override] , literal[string] ) keyword[as] identifier[ofile] :
identifier[ofile] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( literal[string] ))
keyword[return] identifier[_upstart_is_disabled] ( identifier[name] ) | def _upstart_disable(name):
"""
Disable an Upstart service.
"""
if _upstart_is_disabled(name):
return _upstart_is_disabled(name) # depends on [control=['if'], data=[]]
override = '/etc/init/{0}.override'.format(name)
with salt.utils.files.fopen(override, 'a') as ofile:
ofile.write(salt.utils.stringutils.to_str('manual\n')) # depends on [control=['with'], data=['ofile']]
return _upstart_is_disabled(name) |
def t_OPTION_AND_VALUE(self, t):
r'[^ \n\r\t=#]+[ \t=]+[^\r\n#]+' # TODO(etingof) escape hash
if t.value.endswith('\\'):
t.lexer.multiline_newline_seen = False
t.lexer.code_start = t.lexer.lexpos - len(t.value)
t.lexer.begin('multiline')
return
lineno = len(re.findall(r'\r\n|\n|\r', t.value))
option, value = self._parse_option_value(t.value)
process, option, value = self._pre_parse_value(option, value)
if not process:
return
if value.startswith('<<'):
t.lexer.heredoc_anchor = value[2:].strip()
t.lexer.heredoc_option = option
t.lexer.code_start = t.lexer.lexpos
t.lexer.begin('heredoc')
return
t.value = option, value
t.lexer.lineno += lineno
return t | def function[t_OPTION_AND_VALUE, parameter[self, t]]:
constant[[^ \n\r\t=#]+[ \t=]+[^\r\n#]+]
if call[name[t].value.endswith, parameter[constant[\]]] begin[:]
name[t].lexer.multiline_newline_seen assign[=] constant[False]
name[t].lexer.code_start assign[=] binary_operation[name[t].lexer.lexpos - call[name[len], parameter[name[t].value]]]
call[name[t].lexer.begin, parameter[constant[multiline]]]
return[None]
variable[lineno] assign[=] call[name[len], parameter[call[name[re].findall, parameter[constant[\r\n|\n|\r], name[t].value]]]]
<ast.Tuple object at 0x7da20c6c6b90> assign[=] call[name[self]._parse_option_value, parameter[name[t].value]]
<ast.Tuple object at 0x7da20c6c5c60> assign[=] call[name[self]._pre_parse_value, parameter[name[option], name[value]]]
if <ast.UnaryOp object at 0x7da20c6c72e0> begin[:]
return[None]
if call[name[value].startswith, parameter[constant[<<]]] begin[:]
name[t].lexer.heredoc_anchor assign[=] call[call[name[value]][<ast.Slice object at 0x7da20c6c5f30>].strip, parameter[]]
name[t].lexer.heredoc_option assign[=] name[option]
name[t].lexer.code_start assign[=] name[t].lexer.lexpos
call[name[t].lexer.begin, parameter[constant[heredoc]]]
return[None]
name[t].value assign[=] tuple[[<ast.Name object at 0x7da20c7cb310>, <ast.Name object at 0x7da20c7c8c70>]]
<ast.AugAssign object at 0x7da20c7ca170>
return[name[t]] | keyword[def] identifier[t_OPTION_AND_VALUE] ( identifier[self] , identifier[t] ):
literal[string]
keyword[if] identifier[t] . identifier[value] . identifier[endswith] ( literal[string] ):
identifier[t] . identifier[lexer] . identifier[multiline_newline_seen] = keyword[False]
identifier[t] . identifier[lexer] . identifier[code_start] = identifier[t] . identifier[lexer] . identifier[lexpos] - identifier[len] ( identifier[t] . identifier[value] )
identifier[t] . identifier[lexer] . identifier[begin] ( literal[string] )
keyword[return]
identifier[lineno] = identifier[len] ( identifier[re] . identifier[findall] ( literal[string] , identifier[t] . identifier[value] ))
identifier[option] , identifier[value] = identifier[self] . identifier[_parse_option_value] ( identifier[t] . identifier[value] )
identifier[process] , identifier[option] , identifier[value] = identifier[self] . identifier[_pre_parse_value] ( identifier[option] , identifier[value] )
keyword[if] keyword[not] identifier[process] :
keyword[return]
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ):
identifier[t] . identifier[lexer] . identifier[heredoc_anchor] = identifier[value] [ literal[int] :]. identifier[strip] ()
identifier[t] . identifier[lexer] . identifier[heredoc_option] = identifier[option]
identifier[t] . identifier[lexer] . identifier[code_start] = identifier[t] . identifier[lexer] . identifier[lexpos]
identifier[t] . identifier[lexer] . identifier[begin] ( literal[string] )
keyword[return]
identifier[t] . identifier[value] = identifier[option] , identifier[value]
identifier[t] . identifier[lexer] . identifier[lineno] += identifier[lineno]
keyword[return] identifier[t] | def t_OPTION_AND_VALUE(self, t):
"""[^ \\n\\r\\t=#]+[ \\t=]+[^\\r\\n#]+""" # TODO(etingof) escape hash
if t.value.endswith('\\'):
t.lexer.multiline_newline_seen = False
t.lexer.code_start = t.lexer.lexpos - len(t.value)
t.lexer.begin('multiline')
return # depends on [control=['if'], data=[]]
lineno = len(re.findall('\\r\\n|\\n|\\r', t.value))
(option, value) = self._parse_option_value(t.value)
(process, option, value) = self._pre_parse_value(option, value)
if not process:
return # depends on [control=['if'], data=[]]
if value.startswith('<<'):
t.lexer.heredoc_anchor = value[2:].strip()
t.lexer.heredoc_option = option
t.lexer.code_start = t.lexer.lexpos
t.lexer.begin('heredoc')
return # depends on [control=['if'], data=[]]
t.value = (option, value)
t.lexer.lineno += lineno
return t |
def gen_random_mobile():
"""
随机生成一个手机号
:return:
* str: (string) 手机号
举例如下::
print('--- gen_random_mobile demo ---')
print(gen_random_mobile())
print(gen_random_mobile())
print('---')
执行结果::
--- gen_random_mobile demo ---
16706146773
14402633925
---
"""
prefix_list = ["13",
"1400", "1410", "1440", "145", "146", "147", "148",
"15",
"162", "165", "166", "167",
"170", "171", "172", "173", "175", "176", "177", "178", "1740",
"18",
"191", "198", "199"]
prefix_str = random.choice(prefix_list)
return prefix_str + "".join(random.choice("0123456789") for _ in range(11 - len(prefix_str))) | def function[gen_random_mobile, parameter[]]:
constant[
随机生成一个手机号
:return:
* str: (string) 手机号
举例如下::
print('--- gen_random_mobile demo ---')
print(gen_random_mobile())
print(gen_random_mobile())
print('---')
执行结果::
--- gen_random_mobile demo ---
16706146773
14402633925
---
]
variable[prefix_list] assign[=] list[[<ast.Constant object at 0x7da1b074fbb0>, <ast.Constant object at 0x7da1b074ce80>, <ast.Constant object at 0x7da1b074c280>, <ast.Constant object at 0x7da1b074d690>, <ast.Constant object at 0x7da1b074cfa0>, <ast.Constant object at 0x7da1b074c640>, <ast.Constant object at 0x7da1b074dbd0>, <ast.Constant object at 0x7da1b074df90>, <ast.Constant object at 0x7da1b074e080>, <ast.Constant object at 0x7da1b074fca0>, <ast.Constant object at 0x7da1b074e710>, <ast.Constant object at 0x7da1b074ce50>, <ast.Constant object at 0x7da20cabe410>, <ast.Constant object at 0x7da20cabc640>, <ast.Constant object at 0x7da20cabcd60>, <ast.Constant object at 0x7da20cabdff0>, <ast.Constant object at 0x7da20cabce80>, <ast.Constant object at 0x7da20cabe560>, <ast.Constant object at 0x7da20cabd330>, <ast.Constant object at 0x7da20cabce20>, <ast.Constant object at 0x7da20cabc610>, <ast.Constant object at 0x7da20cabe080>, <ast.Constant object at 0x7da20cabda50>, <ast.Constant object at 0x7da20cabf700>, <ast.Constant object at 0x7da20cabc400>, <ast.Constant object at 0x7da20cabf370>]]
variable[prefix_str] assign[=] call[name[random].choice, parameter[name[prefix_list]]]
return[binary_operation[name[prefix_str] + call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da2041d8310>]]]] | keyword[def] identifier[gen_random_mobile] ():
literal[string]
identifier[prefix_list] =[ literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ,
literal[string] , literal[string] , literal[string] ]
identifier[prefix_str] = identifier[random] . identifier[choice] ( identifier[prefix_list] )
keyword[return] identifier[prefix_str] + literal[string] . identifier[join] ( identifier[random] . identifier[choice] ( literal[string] ) keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] - identifier[len] ( identifier[prefix_str] ))) | def gen_random_mobile():
"""
随机生成一个手机号
:return:
* str: (string) 手机号
举例如下::
print('--- gen_random_mobile demo ---')
print(gen_random_mobile())
print(gen_random_mobile())
print('---')
执行结果::
--- gen_random_mobile demo ---
16706146773
14402633925
---
"""
prefix_list = ['13', '1400', '1410', '1440', '145', '146', '147', '148', '15', '162', '165', '166', '167', '170', '171', '172', '173', '175', '176', '177', '178', '1740', '18', '191', '198', '199']
prefix_str = random.choice(prefix_list)
return prefix_str + ''.join((random.choice('0123456789') for _ in range(11 - len(prefix_str)))) |
def run_query(ont, aset, args):
"""
Basic querying by positive/negative class lists
"""
subjects = aset.query(args.query, args.negative)
for s in subjects:
print("{} {}".format(s, str(aset.label(s))))
if args.plot:
import plotly.plotly as py
import plotly.graph_objs as go
tups = aset.query_associations(subjects=subjects)
z, xaxis, yaxis = tuple_to_matrix(tups)
spacechar = " "
xaxis = mk_axis(xaxis, aset, args, spacechar=" ")
yaxis = mk_axis(yaxis, aset, args, spacechar=" ")
logging.info("PLOTTING: {} x {} = {}".format(xaxis, yaxis, z))
trace = go.Heatmap(z=z,
x=xaxis,
y=yaxis)
data=[trace]
py.plot(data, filename='labelled-heatmap') | def function[run_query, parameter[ont, aset, args]]:
constant[
Basic querying by positive/negative class lists
]
variable[subjects] assign[=] call[name[aset].query, parameter[name[args].query, name[args].negative]]
for taget[name[s]] in starred[name[subjects]] begin[:]
call[name[print], parameter[call[constant[{} {}].format, parameter[name[s], call[name[str], parameter[call[name[aset].label, parameter[name[s]]]]]]]]]
if name[args].plot begin[:]
import module[plotly.plotly] as alias[py]
import module[plotly.graph_objs] as alias[go]
variable[tups] assign[=] call[name[aset].query_associations, parameter[]]
<ast.Tuple object at 0x7da1b08c8670> assign[=] call[name[tuple_to_matrix], parameter[name[tups]]]
variable[spacechar] assign[=] constant[ ]
variable[xaxis] assign[=] call[name[mk_axis], parameter[name[xaxis], name[aset], name[args]]]
variable[yaxis] assign[=] call[name[mk_axis], parameter[name[yaxis], name[aset], name[args]]]
call[name[logging].info, parameter[call[constant[PLOTTING: {} x {} = {}].format, parameter[name[xaxis], name[yaxis], name[z]]]]]
variable[trace] assign[=] call[name[go].Heatmap, parameter[]]
variable[data] assign[=] list[[<ast.Name object at 0x7da1b08c95a0>]]
call[name[py].plot, parameter[name[data]]] | keyword[def] identifier[run_query] ( identifier[ont] , identifier[aset] , identifier[args] ):
literal[string]
identifier[subjects] = identifier[aset] . identifier[query] ( identifier[args] . identifier[query] , identifier[args] . identifier[negative] )
keyword[for] identifier[s] keyword[in] identifier[subjects] :
identifier[print] ( literal[string] . identifier[format] ( identifier[s] , identifier[str] ( identifier[aset] . identifier[label] ( identifier[s] ))))
keyword[if] identifier[args] . identifier[plot] :
keyword[import] identifier[plotly] . identifier[plotly] keyword[as] identifier[py]
keyword[import] identifier[plotly] . identifier[graph_objs] keyword[as] identifier[go]
identifier[tups] = identifier[aset] . identifier[query_associations] ( identifier[subjects] = identifier[subjects] )
identifier[z] , identifier[xaxis] , identifier[yaxis] = identifier[tuple_to_matrix] ( identifier[tups] )
identifier[spacechar] = literal[string]
identifier[xaxis] = identifier[mk_axis] ( identifier[xaxis] , identifier[aset] , identifier[args] , identifier[spacechar] = literal[string] )
identifier[yaxis] = identifier[mk_axis] ( identifier[yaxis] , identifier[aset] , identifier[args] , identifier[spacechar] = literal[string] )
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[xaxis] , identifier[yaxis] , identifier[z] ))
identifier[trace] = identifier[go] . identifier[Heatmap] ( identifier[z] = identifier[z] ,
identifier[x] = identifier[xaxis] ,
identifier[y] = identifier[yaxis] )
identifier[data] =[ identifier[trace] ]
identifier[py] . identifier[plot] ( identifier[data] , identifier[filename] = literal[string] ) | def run_query(ont, aset, args):
"""
Basic querying by positive/negative class lists
"""
subjects = aset.query(args.query, args.negative)
for s in subjects:
print('{} {}'.format(s, str(aset.label(s)))) # depends on [control=['for'], data=['s']]
if args.plot:
import plotly.plotly as py
import plotly.graph_objs as go
tups = aset.query_associations(subjects=subjects)
(z, xaxis, yaxis) = tuple_to_matrix(tups)
spacechar = ' '
xaxis = mk_axis(xaxis, aset, args, spacechar=' ')
yaxis = mk_axis(yaxis, aset, args, spacechar=' ')
logging.info('PLOTTING: {} x {} = {}'.format(xaxis, yaxis, z))
trace = go.Heatmap(z=z, x=xaxis, y=yaxis)
data = [trace]
py.plot(data, filename='labelled-heatmap') # depends on [control=['if'], data=[]] |
def add_v3(vec1, m):
"""Return a new Vec3 containing the sum of our x, y, z, and arg.
If argument is a float or vec, addt it to our x, y, and z.
Otherwise, treat it as a Vec3 and add arg.x, arg.y, and arg.z from
our own x, y, and z.
"""
if type(m) in NUMERIC_TYPES:
return Vec3(vec1.x + m, vec1.y + m, vec1.z + m)
else:
return Vec3(vec1.x + m.x, vec1.y + m.y, vec1.z + m.z) | def function[add_v3, parameter[vec1, m]]:
constant[Return a new Vec3 containing the sum of our x, y, z, and arg.
If argument is a float or vec, addt it to our x, y, and z.
Otherwise, treat it as a Vec3 and add arg.x, arg.y, and arg.z from
our own x, y, and z.
]
if compare[call[name[type], parameter[name[m]]] in name[NUMERIC_TYPES]] begin[:]
return[call[name[Vec3], parameter[binary_operation[name[vec1].x + name[m]], binary_operation[name[vec1].y + name[m]], binary_operation[name[vec1].z + name[m]]]]] | keyword[def] identifier[add_v3] ( identifier[vec1] , identifier[m] ):
literal[string]
keyword[if] identifier[type] ( identifier[m] ) keyword[in] identifier[NUMERIC_TYPES] :
keyword[return] identifier[Vec3] ( identifier[vec1] . identifier[x] + identifier[m] , identifier[vec1] . identifier[y] + identifier[m] , identifier[vec1] . identifier[z] + identifier[m] )
keyword[else] :
keyword[return] identifier[Vec3] ( identifier[vec1] . identifier[x] + identifier[m] . identifier[x] , identifier[vec1] . identifier[y] + identifier[m] . identifier[y] , identifier[vec1] . identifier[z] + identifier[m] . identifier[z] ) | def add_v3(vec1, m):
"""Return a new Vec3 containing the sum of our x, y, z, and arg.
If argument is a float or vec, addt it to our x, y, and z.
Otherwise, treat it as a Vec3 and add arg.x, arg.y, and arg.z from
our own x, y, and z.
"""
if type(m) in NUMERIC_TYPES:
return Vec3(vec1.x + m, vec1.y + m, vec1.z + m) # depends on [control=['if'], data=[]]
else:
return Vec3(vec1.x + m.x, vec1.y + m.y, vec1.z + m.z) |
def plot_kde(
values,
values2=None,
cumulative=False,
rug=False,
label=None,
bw=4.5,
quantiles=None,
rotated=False,
contour=True,
fill_last=True,
textsize=None,
plot_kwargs=None,
fill_kwargs=None,
rug_kwargs=None,
contour_kwargs=None,
contourf_kwargs=None,
pcolormesh_kwargs=None,
ax=None,
legend=True,
):
"""1D or 2D KDE plot taking into account boundary conditions.
Parameters
----------
values : array-like
Values to plot
values2 : array-like, optional
Values to plot. If present, a 2D KDE will be estimated
cumulative : bool
If true plot the estimated cumulative distribution function. Defaults to False.
Ignored for 2D KDE
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE
label : string
Text to include as part of the legend
bw : float
Bandwidth scaling factor for 1D KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's
rule of thumb (the default rule used by SciPy).
quantiles : list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to None.
rotated : bool
Whether to rotate the 1D KDE plot 90 degrees.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
plot_kwargs : dict
Keywords passed to the pdf line of a 1D KDE.
fill_kwargs : dict
Keywords passed to the fill under the line (use fill_kwargs={'alpha': 0} to disable fill).
Ignored for 2D KDE
rug_kwargs : dict
Keywords passed to the rug plot. Ignored if rug=False or for 2D KDE
Use `space` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot.
contour_kwargs : dict
Keywords passed to ax.contour. Ignored for 1D KDE.
contourf_kwargs : dict
Keywords passed to ax.contourf. Ignored for 1D KDE.
pcolormesh_kwargs : dict
Keywords passed to ax.pcolormesh. Ignored for 1D KDE.
ax : matplotlib axes
legend : bool
Add legend to the figure. By default True.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot default KDE
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> mu_posterior = np.concatenate(non_centered.posterior["mu"].values)
>>> az.plot_kde(mu_posterior)
Plot KDE with rugplot
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rug=True)
Plot a cumulative distribution
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, cumulative=True)
Rotate plot 90 degrees
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rotated=True)
Plot 2d contour KDE
.. plot::
:context: close-figs
>>> tau_posterior = np.concatenate(non_centered.posterior["tau"].values)
>>> az.plot_kde(mu_posterior, values2=tau_posterior)
Remove fill for last contour in 2d KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, fill_last=False)
Plot 2d smooth KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, contour=False)
"""
if ax is None:
ax = plt.gca()
figsize = ax.get_figure().get_size_inches()
figsize, *_, xt_labelsize, linewidth, markersize = _scale_fig_size(figsize, textsize, 1, 1)
if isinstance(values, xr.Dataset):
raise ValueError(
"Xarray dataset object detected.Use plot_posterior, plot_density, plot_joint"
"or plot_pair instead of plot_kde"
)
if isinstance(values, InferenceData):
raise ValueError(" Inference Data object detected. Use plot_posterior instead of plot_kde")
if values2 is None:
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", "C0")
default_color = plot_kwargs.get("color")
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", default_color)
if rug_kwargs is None:
rug_kwargs = {}
rug_kwargs.setdefault("marker", "_" if rotated else "|")
rug_kwargs.setdefault("linestyle", "None")
rug_kwargs.setdefault("color", default_color)
rug_kwargs.setdefault("space", 0.2)
plot_kwargs.setdefault("linewidth", linewidth)
rug_kwargs.setdefault("markersize", 2 * markersize)
density, lower, upper = _fast_kde(values, cumulative, bw)
rug_space = max(density) * rug_kwargs.pop("space")
x = np.linspace(lower, upper, len(density))
if cumulative:
density_q = density
else:
density_q = density.cumsum() / density.sum()
fill_func = ax.fill_between
fill_x, fill_y = x, density
if rotated:
x, density = density, x
fill_func = ax.fill_betweenx
ax.tick_params(labelsize=xt_labelsize)
if rotated:
ax.set_xlim(0, auto=True)
rug_x, rug_y = np.zeros_like(values) - rug_space, values
else:
ax.set_ylim(0, auto=True)
rug_x, rug_y = values, np.zeros_like(values) - rug_space
if rug:
ax.plot(rug_x, rug_y, **rug_kwargs)
if quantiles is not None:
fill_kwargs.setdefault("alpha", 0.75)
idx = [np.sum(density_q < quant) for quant in quantiles]
fill_func(
fill_x,
fill_y,
where=np.isin(fill_x, fill_x[idx], invert=True, assume_unique=True),
**fill_kwargs
)
else:
fill_kwargs.setdefault("alpha", 0)
ax.plot(x, density, label=label, **plot_kwargs)
fill_func(fill_x, fill_y, **fill_kwargs)
if legend and label:
legend_element = [Patch(edgecolor=default_color, label=label)]
ax.legend(handles=legend_element)
else:
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs.setdefault("colors", "0.5")
if contourf_kwargs is None:
contourf_kwargs = {}
if pcolormesh_kwargs is None:
pcolormesh_kwargs = {}
gridsize = (128, 128) if contour else (256, 256)
density, xmin, xmax, ymin, ymax = _fast_kde_2d(values, values2, gridsize=gridsize)
g_s = complex(gridsize[0])
x_x, y_y = np.mgrid[xmin:xmax:g_s, ymin:ymax:g_s]
ax.grid(False)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if contour:
qcfs = ax.contourf(x_x, y_y, density, antialiased=True, **contourf_kwargs)
qcs = ax.contour(x_x, y_y, density, **contour_kwargs)
if not fill_last:
qcfs.collections[0].set_alpha(0)
qcs.collections[0].set_alpha(0)
else:
ax.pcolormesh(x_x, y_y, density, **pcolormesh_kwargs)
return ax | def function[plot_kde, parameter[values, values2, cumulative, rug, label, bw, quantiles, rotated, contour, fill_last, textsize, plot_kwargs, fill_kwargs, rug_kwargs, contour_kwargs, contourf_kwargs, pcolormesh_kwargs, ax, legend]]:
constant[1D or 2D KDE plot taking into account boundary conditions.
Parameters
----------
values : array-like
Values to plot
values2 : array-like, optional
Values to plot. If present, a 2D KDE will be estimated
cumulative : bool
If true plot the estimated cumulative distribution function. Defaults to False.
Ignored for 2D KDE
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE
label : string
Text to include as part of the legend
bw : float
Bandwidth scaling factor for 1D KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's
rule of thumb (the default rule used by SciPy).
quantiles : list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to None.
rotated : bool
Whether to rotate the 1D KDE plot 90 degrees.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
plot_kwargs : dict
Keywords passed to the pdf line of a 1D KDE.
fill_kwargs : dict
Keywords passed to the fill under the line (use fill_kwargs={'alpha': 0} to disable fill).
Ignored for 2D KDE
rug_kwargs : dict
Keywords passed to the rug plot. Ignored if rug=False or for 2D KDE
Use `space` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot.
contour_kwargs : dict
Keywords passed to ax.contour. Ignored for 1D KDE.
contourf_kwargs : dict
Keywords passed to ax.contourf. Ignored for 1D KDE.
pcolormesh_kwargs : dict
Keywords passed to ax.pcolormesh. Ignored for 1D KDE.
ax : matplotlib axes
legend : bool
Add legend to the figure. By default True.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot default KDE
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> mu_posterior = np.concatenate(non_centered.posterior["mu"].values)
>>> az.plot_kde(mu_posterior)
Plot KDE with rugplot
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rug=True)
Plot a cumulative distribution
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, cumulative=True)
Rotate plot 90 degrees
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rotated=True)
Plot 2d contour KDE
.. plot::
:context: close-figs
>>> tau_posterior = np.concatenate(non_centered.posterior["tau"].values)
>>> az.plot_kde(mu_posterior, values2=tau_posterior)
Remove fill for last contour in 2d KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, fill_last=False)
Plot 2d smooth KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, contour=False)
]
if compare[name[ax] is constant[None]] begin[:]
variable[ax] assign[=] call[name[plt].gca, parameter[]]
variable[figsize] assign[=] call[call[name[ax].get_figure, parameter[]].get_size_inches, parameter[]]
<ast.Tuple object at 0x7da1b1c7f5b0> assign[=] call[name[_scale_fig_size], parameter[name[figsize], name[textsize], constant[1], constant[1]]]
if call[name[isinstance], parameter[name[values], name[xr].Dataset]] begin[:]
<ast.Raise object at 0x7da1b1c7e4d0>
if call[name[isinstance], parameter[name[values], name[InferenceData]]] begin[:]
<ast.Raise object at 0x7da1b1c7edd0>
if compare[name[values2] is constant[None]] begin[:]
if compare[name[plot_kwargs] is constant[None]] begin[:]
variable[plot_kwargs] assign[=] dictionary[[], []]
call[name[plot_kwargs].setdefault, parameter[constant[color], constant[C0]]]
variable[default_color] assign[=] call[name[plot_kwargs].get, parameter[constant[color]]]
if compare[name[fill_kwargs] is constant[None]] begin[:]
variable[fill_kwargs] assign[=] dictionary[[], []]
call[name[fill_kwargs].setdefault, parameter[constant[color], name[default_color]]]
if compare[name[rug_kwargs] is constant[None]] begin[:]
variable[rug_kwargs] assign[=] dictionary[[], []]
call[name[rug_kwargs].setdefault, parameter[constant[marker], <ast.IfExp object at 0x7da1b1c7da20>]]
call[name[rug_kwargs].setdefault, parameter[constant[linestyle], constant[None]]]
call[name[rug_kwargs].setdefault, parameter[constant[color], name[default_color]]]
call[name[rug_kwargs].setdefault, parameter[constant[space], constant[0.2]]]
call[name[plot_kwargs].setdefault, parameter[constant[linewidth], name[linewidth]]]
call[name[rug_kwargs].setdefault, parameter[constant[markersize], binary_operation[constant[2] * name[markersize]]]]
<ast.Tuple object at 0x7da1b1c7dde0> assign[=] call[name[_fast_kde], parameter[name[values], name[cumulative], name[bw]]]
variable[rug_space] assign[=] binary_operation[call[name[max], parameter[name[density]]] * call[name[rug_kwargs].pop, parameter[constant[space]]]]
variable[x] assign[=] call[name[np].linspace, parameter[name[lower], name[upper], call[name[len], parameter[name[density]]]]]
if name[cumulative] begin[:]
variable[density_q] assign[=] name[density]
variable[fill_func] assign[=] name[ax].fill_between
<ast.Tuple object at 0x7da1b1c7e860> assign[=] tuple[[<ast.Name object at 0x7da1b1c7e710>, <ast.Name object at 0x7da1b1c7c100>]]
if name[rotated] begin[:]
<ast.Tuple object at 0x7da1b1c7f310> assign[=] tuple[[<ast.Name object at 0x7da1b1c7cac0>, <ast.Name object at 0x7da1b1c7d450>]]
variable[fill_func] assign[=] name[ax].fill_betweenx
call[name[ax].tick_params, parameter[]]
if name[rotated] begin[:]
call[name[ax].set_xlim, parameter[constant[0]]]
<ast.Tuple object at 0x7da1b1c7ef50> assign[=] tuple[[<ast.BinOp object at 0x7da1b1c7fe50>, <ast.Name object at 0x7da1b1c7dc90>]]
if name[rug] begin[:]
call[name[ax].plot, parameter[name[rug_x], name[rug_y]]]
if compare[name[quantiles] is_not constant[None]] begin[:]
call[name[fill_kwargs].setdefault, parameter[constant[alpha], constant[0.75]]]
variable[idx] assign[=] <ast.ListComp object at 0x7da1b26ae5f0>
call[name[fill_func], parameter[name[fill_x], name[fill_y]]]
if <ast.BoolOp object at 0x7da1b1bd7280> begin[:]
variable[legend_element] assign[=] list[[<ast.Call object at 0x7da1b1bd74f0>]]
call[name[ax].legend, parameter[]]
return[name[ax]] | keyword[def] identifier[plot_kde] (
identifier[values] ,
identifier[values2] = keyword[None] ,
identifier[cumulative] = keyword[False] ,
identifier[rug] = keyword[False] ,
identifier[label] = keyword[None] ,
identifier[bw] = literal[int] ,
identifier[quantiles] = keyword[None] ,
identifier[rotated] = keyword[False] ,
identifier[contour] = keyword[True] ,
identifier[fill_last] = keyword[True] ,
identifier[textsize] = keyword[None] ,
identifier[plot_kwargs] = keyword[None] ,
identifier[fill_kwargs] = keyword[None] ,
identifier[rug_kwargs] = keyword[None] ,
identifier[contour_kwargs] = keyword[None] ,
identifier[contourf_kwargs] = keyword[None] ,
identifier[pcolormesh_kwargs] = keyword[None] ,
identifier[ax] = keyword[None] ,
identifier[legend] = keyword[True] ,
):
literal[string]
keyword[if] identifier[ax] keyword[is] keyword[None] :
identifier[ax] = identifier[plt] . identifier[gca] ()
identifier[figsize] = identifier[ax] . identifier[get_figure] (). identifier[get_size_inches] ()
identifier[figsize] ,* identifier[_] , identifier[xt_labelsize] , identifier[linewidth] , identifier[markersize] = identifier[_scale_fig_size] ( identifier[figsize] , identifier[textsize] , literal[int] , literal[int] )
keyword[if] identifier[isinstance] ( identifier[values] , identifier[xr] . identifier[Dataset] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] identifier[isinstance] ( identifier[values] , identifier[InferenceData] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[values2] keyword[is] keyword[None] :
keyword[if] identifier[plot_kwargs] keyword[is] keyword[None] :
identifier[plot_kwargs] ={}
identifier[plot_kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[default_color] = identifier[plot_kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[fill_kwargs] keyword[is] keyword[None] :
identifier[fill_kwargs] ={}
identifier[fill_kwargs] . identifier[setdefault] ( literal[string] , identifier[default_color] )
keyword[if] identifier[rug_kwargs] keyword[is] keyword[None] :
identifier[rug_kwargs] ={}
identifier[rug_kwargs] . identifier[setdefault] ( literal[string] , literal[string] keyword[if] identifier[rotated] keyword[else] literal[string] )
identifier[rug_kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[rug_kwargs] . identifier[setdefault] ( literal[string] , identifier[default_color] )
identifier[rug_kwargs] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[plot_kwargs] . identifier[setdefault] ( literal[string] , identifier[linewidth] )
identifier[rug_kwargs] . identifier[setdefault] ( literal[string] , literal[int] * identifier[markersize] )
identifier[density] , identifier[lower] , identifier[upper] = identifier[_fast_kde] ( identifier[values] , identifier[cumulative] , identifier[bw] )
identifier[rug_space] = identifier[max] ( identifier[density] )* identifier[rug_kwargs] . identifier[pop] ( literal[string] )
identifier[x] = identifier[np] . identifier[linspace] ( identifier[lower] , identifier[upper] , identifier[len] ( identifier[density] ))
keyword[if] identifier[cumulative] :
identifier[density_q] = identifier[density]
keyword[else] :
identifier[density_q] = identifier[density] . identifier[cumsum] ()/ identifier[density] . identifier[sum] ()
identifier[fill_func] = identifier[ax] . identifier[fill_between]
identifier[fill_x] , identifier[fill_y] = identifier[x] , identifier[density]
keyword[if] identifier[rotated] :
identifier[x] , identifier[density] = identifier[density] , identifier[x]
identifier[fill_func] = identifier[ax] . identifier[fill_betweenx]
identifier[ax] . identifier[tick_params] ( identifier[labelsize] = identifier[xt_labelsize] )
keyword[if] identifier[rotated] :
identifier[ax] . identifier[set_xlim] ( literal[int] , identifier[auto] = keyword[True] )
identifier[rug_x] , identifier[rug_y] = identifier[np] . identifier[zeros_like] ( identifier[values] )- identifier[rug_space] , identifier[values]
keyword[else] :
identifier[ax] . identifier[set_ylim] ( literal[int] , identifier[auto] = keyword[True] )
identifier[rug_x] , identifier[rug_y] = identifier[values] , identifier[np] . identifier[zeros_like] ( identifier[values] )- identifier[rug_space]
keyword[if] identifier[rug] :
identifier[ax] . identifier[plot] ( identifier[rug_x] , identifier[rug_y] ,** identifier[rug_kwargs] )
keyword[if] identifier[quantiles] keyword[is] keyword[not] keyword[None] :
identifier[fill_kwargs] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[idx] =[ identifier[np] . identifier[sum] ( identifier[density_q] < identifier[quant] ) keyword[for] identifier[quant] keyword[in] identifier[quantiles] ]
identifier[fill_func] (
identifier[fill_x] ,
identifier[fill_y] ,
identifier[where] = identifier[np] . identifier[isin] ( identifier[fill_x] , identifier[fill_x] [ identifier[idx] ], identifier[invert] = keyword[True] , identifier[assume_unique] = keyword[True] ),
** identifier[fill_kwargs]
)
keyword[else] :
identifier[fill_kwargs] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[ax] . identifier[plot] ( identifier[x] , identifier[density] , identifier[label] = identifier[label] ,** identifier[plot_kwargs] )
identifier[fill_func] ( identifier[fill_x] , identifier[fill_y] ,** identifier[fill_kwargs] )
keyword[if] identifier[legend] keyword[and] identifier[label] :
identifier[legend_element] =[ identifier[Patch] ( identifier[edgecolor] = identifier[default_color] , identifier[label] = identifier[label] )]
identifier[ax] . identifier[legend] ( identifier[handles] = identifier[legend_element] )
keyword[else] :
keyword[if] identifier[contour_kwargs] keyword[is] keyword[None] :
identifier[contour_kwargs] ={}
identifier[contour_kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
keyword[if] identifier[contourf_kwargs] keyword[is] keyword[None] :
identifier[contourf_kwargs] ={}
keyword[if] identifier[pcolormesh_kwargs] keyword[is] keyword[None] :
identifier[pcolormesh_kwargs] ={}
identifier[gridsize] =( literal[int] , literal[int] ) keyword[if] identifier[contour] keyword[else] ( literal[int] , literal[int] )
identifier[density] , identifier[xmin] , identifier[xmax] , identifier[ymin] , identifier[ymax] = identifier[_fast_kde_2d] ( identifier[values] , identifier[values2] , identifier[gridsize] = identifier[gridsize] )
identifier[g_s] = identifier[complex] ( identifier[gridsize] [ literal[int] ])
identifier[x_x] , identifier[y_y] = identifier[np] . identifier[mgrid] [ identifier[xmin] : identifier[xmax] : identifier[g_s] , identifier[ymin] : identifier[ymax] : identifier[g_s] ]
identifier[ax] . identifier[grid] ( keyword[False] )
identifier[ax] . identifier[set_xlim] ( identifier[xmin] , identifier[xmax] )
identifier[ax] . identifier[set_ylim] ( identifier[ymin] , identifier[ymax] )
keyword[if] identifier[contour] :
identifier[qcfs] = identifier[ax] . identifier[contourf] ( identifier[x_x] , identifier[y_y] , identifier[density] , identifier[antialiased] = keyword[True] ,** identifier[contourf_kwargs] )
identifier[qcs] = identifier[ax] . identifier[contour] ( identifier[x_x] , identifier[y_y] , identifier[density] ,** identifier[contour_kwargs] )
keyword[if] keyword[not] identifier[fill_last] :
identifier[qcfs] . identifier[collections] [ literal[int] ]. identifier[set_alpha] ( literal[int] )
identifier[qcs] . identifier[collections] [ literal[int] ]. identifier[set_alpha] ( literal[int] )
keyword[else] :
identifier[ax] . identifier[pcolormesh] ( identifier[x_x] , identifier[y_y] , identifier[density] ,** identifier[pcolormesh_kwargs] )
keyword[return] identifier[ax] | def plot_kde(values, values2=None, cumulative=False, rug=False, label=None, bw=4.5, quantiles=None, rotated=False, contour=True, fill_last=True, textsize=None, plot_kwargs=None, fill_kwargs=None, rug_kwargs=None, contour_kwargs=None, contourf_kwargs=None, pcolormesh_kwargs=None, ax=None, legend=True):
"""1D or 2D KDE plot taking into account boundary conditions.
Parameters
----------
values : array-like
Values to plot
values2 : array-like, optional
Values to plot. If present, a 2D KDE will be estimated
cumulative : bool
If true plot the estimated cumulative distribution function. Defaults to False.
Ignored for 2D KDE
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE
label : string
Text to include as part of the legend
bw : float
Bandwidth scaling factor for 1D KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's
rule of thumb (the default rule used by SciPy).
quantiles : list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to None.
rotated : bool
Whether to rotate the 1D KDE plot 90 degrees.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
plot_kwargs : dict
Keywords passed to the pdf line of a 1D KDE.
fill_kwargs : dict
Keywords passed to the fill under the line (use fill_kwargs={'alpha': 0} to disable fill).
Ignored for 2D KDE
rug_kwargs : dict
Keywords passed to the rug plot. Ignored if rug=False or for 2D KDE
Use `space` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot.
contour_kwargs : dict
Keywords passed to ax.contour. Ignored for 1D KDE.
contourf_kwargs : dict
Keywords passed to ax.contourf. Ignored for 1D KDE.
pcolormesh_kwargs : dict
Keywords passed to ax.pcolormesh. Ignored for 1D KDE.
ax : matplotlib axes
legend : bool
Add legend to the figure. By default True.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot default KDE
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> mu_posterior = np.concatenate(non_centered.posterior["mu"].values)
>>> az.plot_kde(mu_posterior)
Plot KDE with rugplot
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rug=True)
Plot a cumulative distribution
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, cumulative=True)
Rotate plot 90 degrees
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rotated=True)
Plot 2d contour KDE
.. plot::
:context: close-figs
>>> tau_posterior = np.concatenate(non_centered.posterior["tau"].values)
>>> az.plot_kde(mu_posterior, values2=tau_posterior)
Remove fill for last contour in 2d KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, fill_last=False)
Plot 2d smooth KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, contour=False)
"""
if ax is None:
ax = plt.gca() # depends on [control=['if'], data=['ax']]
figsize = ax.get_figure().get_size_inches()
(figsize, *_, xt_labelsize, linewidth, markersize) = _scale_fig_size(figsize, textsize, 1, 1)
if isinstance(values, xr.Dataset):
raise ValueError('Xarray dataset object detected.Use plot_posterior, plot_density, plot_jointor plot_pair instead of plot_kde') # depends on [control=['if'], data=[]]
if isinstance(values, InferenceData):
raise ValueError(' Inference Data object detected. Use plot_posterior instead of plot_kde') # depends on [control=['if'], data=[]]
if values2 is None:
if plot_kwargs is None:
plot_kwargs = {} # depends on [control=['if'], data=['plot_kwargs']]
plot_kwargs.setdefault('color', 'C0')
default_color = plot_kwargs.get('color')
if fill_kwargs is None:
fill_kwargs = {} # depends on [control=['if'], data=['fill_kwargs']]
fill_kwargs.setdefault('color', default_color)
if rug_kwargs is None:
rug_kwargs = {} # depends on [control=['if'], data=['rug_kwargs']]
rug_kwargs.setdefault('marker', '_' if rotated else '|')
rug_kwargs.setdefault('linestyle', 'None')
rug_kwargs.setdefault('color', default_color)
rug_kwargs.setdefault('space', 0.2)
plot_kwargs.setdefault('linewidth', linewidth)
rug_kwargs.setdefault('markersize', 2 * markersize)
(density, lower, upper) = _fast_kde(values, cumulative, bw)
rug_space = max(density) * rug_kwargs.pop('space')
x = np.linspace(lower, upper, len(density))
if cumulative:
density_q = density # depends on [control=['if'], data=[]]
else:
density_q = density.cumsum() / density.sum()
fill_func = ax.fill_between
(fill_x, fill_y) = (x, density)
if rotated:
(x, density) = (density, x)
fill_func = ax.fill_betweenx # depends on [control=['if'], data=[]]
ax.tick_params(labelsize=xt_labelsize)
if rotated:
ax.set_xlim(0, auto=True)
(rug_x, rug_y) = (np.zeros_like(values) - rug_space, values) # depends on [control=['if'], data=[]]
else:
ax.set_ylim(0, auto=True)
(rug_x, rug_y) = (values, np.zeros_like(values) - rug_space)
if rug:
ax.plot(rug_x, rug_y, **rug_kwargs) # depends on [control=['if'], data=[]]
if quantiles is not None:
fill_kwargs.setdefault('alpha', 0.75)
idx = [np.sum(density_q < quant) for quant in quantiles]
fill_func(fill_x, fill_y, where=np.isin(fill_x, fill_x[idx], invert=True, assume_unique=True), **fill_kwargs) # depends on [control=['if'], data=['quantiles']]
else:
fill_kwargs.setdefault('alpha', 0)
ax.plot(x, density, label=label, **plot_kwargs)
fill_func(fill_x, fill_y, **fill_kwargs)
if legend and label:
legend_element = [Patch(edgecolor=default_color, label=label)]
ax.legend(handles=legend_element) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if contour_kwargs is None:
contour_kwargs = {} # depends on [control=['if'], data=['contour_kwargs']]
contour_kwargs.setdefault('colors', '0.5')
if contourf_kwargs is None:
contourf_kwargs = {} # depends on [control=['if'], data=['contourf_kwargs']]
if pcolormesh_kwargs is None:
pcolormesh_kwargs = {} # depends on [control=['if'], data=['pcolormesh_kwargs']]
gridsize = (128, 128) if contour else (256, 256)
(density, xmin, xmax, ymin, ymax) = _fast_kde_2d(values, values2, gridsize=gridsize)
g_s = complex(gridsize[0])
(x_x, y_y) = np.mgrid[xmin:xmax:g_s, ymin:ymax:g_s]
ax.grid(False)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if contour:
qcfs = ax.contourf(x_x, y_y, density, antialiased=True, **contourf_kwargs)
qcs = ax.contour(x_x, y_y, density, **contour_kwargs)
if not fill_last:
qcfs.collections[0].set_alpha(0)
qcs.collections[0].set_alpha(0) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
ax.pcolormesh(x_x, y_y, density, **pcolormesh_kwargs)
return ax |
def set_resource_value(self, device_id, resource_path, resource_value,
fix_path=True, timeout=None):
"""Set resource value for given resource path, on device.
Will block and wait for response to come through. Usage:
.. code-block:: python
try:
v = api.set_resource_value(device, path, value)
print("Success, new value:", v)
except AsyncError, e:
print("Error", e)
:param str device_id: The name/id of the device (Required)
:param str resource_path: The resource path to update (Required)
:param str resource_value: The new value to set for given path
:param fix_path: Unused
:param timeout: Timeout in seconds
:raises: AsyncError
:returns: The value of the new resource
:rtype: str
"""
self.ensure_notifications_thread()
return self.set_resource_value_async(
device_id, resource_path, resource_value
).wait(timeout) | def function[set_resource_value, parameter[self, device_id, resource_path, resource_value, fix_path, timeout]]:
constant[Set resource value for given resource path, on device.
Will block and wait for response to come through. Usage:
.. code-block:: python
try:
v = api.set_resource_value(device, path, value)
print("Success, new value:", v)
except AsyncError, e:
print("Error", e)
:param str device_id: The name/id of the device (Required)
:param str resource_path: The resource path to update (Required)
:param str resource_value: The new value to set for given path
:param fix_path: Unused
:param timeout: Timeout in seconds
:raises: AsyncError
:returns: The value of the new resource
:rtype: str
]
call[name[self].ensure_notifications_thread, parameter[]]
return[call[call[name[self].set_resource_value_async, parameter[name[device_id], name[resource_path], name[resource_value]]].wait, parameter[name[timeout]]]] | keyword[def] identifier[set_resource_value] ( identifier[self] , identifier[device_id] , identifier[resource_path] , identifier[resource_value] ,
identifier[fix_path] = keyword[True] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[self] . identifier[ensure_notifications_thread] ()
keyword[return] identifier[self] . identifier[set_resource_value_async] (
identifier[device_id] , identifier[resource_path] , identifier[resource_value]
). identifier[wait] ( identifier[timeout] ) | def set_resource_value(self, device_id, resource_path, resource_value, fix_path=True, timeout=None):
"""Set resource value for given resource path, on device.
Will block and wait for response to come through. Usage:
.. code-block:: python
try:
v = api.set_resource_value(device, path, value)
print("Success, new value:", v)
except AsyncError, e:
print("Error", e)
:param str device_id: The name/id of the device (Required)
:param str resource_path: The resource path to update (Required)
:param str resource_value: The new value to set for given path
:param fix_path: Unused
:param timeout: Timeout in seconds
:raises: AsyncError
:returns: The value of the new resource
:rtype: str
"""
self.ensure_notifications_thread()
return self.set_resource_value_async(device_id, resource_path, resource_value).wait(timeout) |
def flushOutBoxes(self) -> None:
"""
Clear the outBoxes and transmit batched messages to remotes.
"""
removedRemotes = []
for rid, msgs in self.outBoxes.items():
try:
dest = self.remotes[rid].name
except KeyError:
removedRemotes.append(rid)
continue
if msgs:
if self._should_batch(msgs):
logger.trace(
"{} batching {} msgs to {} into fewer transmissions".
format(self, len(msgs), dest))
logger.trace(" messages: {}".format(msgs))
batches = split_messages_on_batches(list(msgs),
self._make_batch,
self._test_batch_len,
)
msgs.clear()
if batches:
for batch, size in batches:
logger.trace("{} sending payload to {}: {}".format(
self, dest, batch))
self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, size)
# Setting timeout to never expire
self.transmit(
batch,
rid,
timeout=self.messageTimeout,
serialized=True)
else:
logger.error("{} cannot create batch(es) for {}".format(self, dest))
else:
while msgs:
msg = msgs.popleft()
logger.trace(
"{} sending msg {} to {}".format(self, msg, dest))
self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, 1)
# Setting timeout to never expire
self.transmit(msg, rid, timeout=self.messageTimeout,
serialized=True)
for rid in removedRemotes:
logger.warning("{}{} has removed rid {}"
.format(CONNECTION_PREFIX, self,
z85_to_friendly(rid)),
extra={"cli": False})
msgs = self.outBoxes[rid]
if msgs:
self.discard(msgs,
"{}rid {} no longer available"
.format(CONNECTION_PREFIX,
z85_to_friendly(rid)),
logMethod=logger.debug)
del self.outBoxes[rid] | def function[flushOutBoxes, parameter[self]]:
constant[
Clear the outBoxes and transmit batched messages to remotes.
]
variable[removedRemotes] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18dc98370>, <ast.Name object at 0x7da18dc98250>]]] in starred[call[name[self].outBoxes.items, parameter[]]] begin[:]
<ast.Try object at 0x7da18dc9a500>
if name[msgs] begin[:]
if call[name[self]._should_batch, parameter[name[msgs]]] begin[:]
call[name[logger].trace, parameter[call[constant[{} batching {} msgs to {} into fewer transmissions].format, parameter[name[self], call[name[len], parameter[name[msgs]]], name[dest]]]]]
call[name[logger].trace, parameter[call[constant[ messages: {}].format, parameter[name[msgs]]]]]
variable[batches] assign[=] call[name[split_messages_on_batches], parameter[call[name[list], parameter[name[msgs]]], name[self]._make_batch, name[self]._test_batch_len]]
call[name[msgs].clear, parameter[]]
if name[batches] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18dc99d80>, <ast.Name object at 0x7da18dc98d90>]]] in starred[name[batches]] begin[:]
call[name[logger].trace, parameter[call[constant[{} sending payload to {}: {}].format, parameter[name[self], name[dest], name[batch]]]]]
call[name[self].metrics.add_event, parameter[name[MetricsName].TRANSPORT_BATCH_SIZE, name[size]]]
call[name[self].transmit, parameter[name[batch], name[rid]]]
for taget[name[rid]] in starred[name[removedRemotes]] begin[:]
call[name[logger].warning, parameter[call[constant[{}{} has removed rid {}].format, parameter[name[CONNECTION_PREFIX], name[self], call[name[z85_to_friendly], parameter[name[rid]]]]]]]
variable[msgs] assign[=] call[name[self].outBoxes][name[rid]]
if name[msgs] begin[:]
call[name[self].discard, parameter[name[msgs], call[constant[{}rid {} no longer available].format, parameter[name[CONNECTION_PREFIX], call[name[z85_to_friendly], parameter[name[rid]]]]]]]
<ast.Delete object at 0x7da1b170c2e0> | keyword[def] identifier[flushOutBoxes] ( identifier[self] )-> keyword[None] :
literal[string]
identifier[removedRemotes] =[]
keyword[for] identifier[rid] , identifier[msgs] keyword[in] identifier[self] . identifier[outBoxes] . identifier[items] ():
keyword[try] :
identifier[dest] = identifier[self] . identifier[remotes] [ identifier[rid] ]. identifier[name]
keyword[except] identifier[KeyError] :
identifier[removedRemotes] . identifier[append] ( identifier[rid] )
keyword[continue]
keyword[if] identifier[msgs] :
keyword[if] identifier[self] . identifier[_should_batch] ( identifier[msgs] ):
identifier[logger] . identifier[trace] (
literal[string] .
identifier[format] ( identifier[self] , identifier[len] ( identifier[msgs] ), identifier[dest] ))
identifier[logger] . identifier[trace] ( literal[string] . identifier[format] ( identifier[msgs] ))
identifier[batches] = identifier[split_messages_on_batches] ( identifier[list] ( identifier[msgs] ),
identifier[self] . identifier[_make_batch] ,
identifier[self] . identifier[_test_batch_len] ,
)
identifier[msgs] . identifier[clear] ()
keyword[if] identifier[batches] :
keyword[for] identifier[batch] , identifier[size] keyword[in] identifier[batches] :
identifier[logger] . identifier[trace] ( literal[string] . identifier[format] (
identifier[self] , identifier[dest] , identifier[batch] ))
identifier[self] . identifier[metrics] . identifier[add_event] ( identifier[MetricsName] . identifier[TRANSPORT_BATCH_SIZE] , identifier[size] )
identifier[self] . identifier[transmit] (
identifier[batch] ,
identifier[rid] ,
identifier[timeout] = identifier[self] . identifier[messageTimeout] ,
identifier[serialized] = keyword[True] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[self] , identifier[dest] ))
keyword[else] :
keyword[while] identifier[msgs] :
identifier[msg] = identifier[msgs] . identifier[popleft] ()
identifier[logger] . identifier[trace] (
literal[string] . identifier[format] ( identifier[self] , identifier[msg] , identifier[dest] ))
identifier[self] . identifier[metrics] . identifier[add_event] ( identifier[MetricsName] . identifier[TRANSPORT_BATCH_SIZE] , literal[int] )
identifier[self] . identifier[transmit] ( identifier[msg] , identifier[rid] , identifier[timeout] = identifier[self] . identifier[messageTimeout] ,
identifier[serialized] = keyword[True] )
keyword[for] identifier[rid] keyword[in] identifier[removedRemotes] :
identifier[logger] . identifier[warning] ( literal[string]
. identifier[format] ( identifier[CONNECTION_PREFIX] , identifier[self] ,
identifier[z85_to_friendly] ( identifier[rid] )),
identifier[extra] ={ literal[string] : keyword[False] })
identifier[msgs] = identifier[self] . identifier[outBoxes] [ identifier[rid] ]
keyword[if] identifier[msgs] :
identifier[self] . identifier[discard] ( identifier[msgs] ,
literal[string]
. identifier[format] ( identifier[CONNECTION_PREFIX] ,
identifier[z85_to_friendly] ( identifier[rid] )),
identifier[logMethod] = identifier[logger] . identifier[debug] )
keyword[del] identifier[self] . identifier[outBoxes] [ identifier[rid] ] | def flushOutBoxes(self) -> None:
"""
Clear the outBoxes and transmit batched messages to remotes.
"""
removedRemotes = []
for (rid, msgs) in self.outBoxes.items():
try:
dest = self.remotes[rid].name # depends on [control=['try'], data=[]]
except KeyError:
removedRemotes.append(rid)
continue # depends on [control=['except'], data=[]]
if msgs:
if self._should_batch(msgs):
logger.trace('{} batching {} msgs to {} into fewer transmissions'.format(self, len(msgs), dest))
logger.trace(' messages: {}'.format(msgs))
batches = split_messages_on_batches(list(msgs), self._make_batch, self._test_batch_len)
msgs.clear()
if batches:
for (batch, size) in batches:
logger.trace('{} sending payload to {}: {}'.format(self, dest, batch))
self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, size)
# Setting timeout to never expire
self.transmit(batch, rid, timeout=self.messageTimeout, serialized=True) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
logger.error('{} cannot create batch(es) for {}'.format(self, dest)) # depends on [control=['if'], data=[]]
else:
while msgs:
msg = msgs.popleft()
logger.trace('{} sending msg {} to {}'.format(self, msg, dest))
self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, 1)
# Setting timeout to never expire
self.transmit(msg, rid, timeout=self.messageTimeout, serialized=True) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for rid in removedRemotes:
logger.warning('{}{} has removed rid {}'.format(CONNECTION_PREFIX, self, z85_to_friendly(rid)), extra={'cli': False})
msgs = self.outBoxes[rid]
if msgs:
self.discard(msgs, '{}rid {} no longer available'.format(CONNECTION_PREFIX, z85_to_friendly(rid)), logMethod=logger.debug) # depends on [control=['if'], data=[]]
del self.outBoxes[rid] # depends on [control=['for'], data=['rid']] |
def __initialize_node(self, attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)):
"""
Initializes the node.
:param attributes_flags: Attributes flags.
:type attributes_flags: int
"""
attributes = dir(self.__component)
for attribute in attributes:
if attribute == "name":
continue
if not "_Profile__{0}".format(attribute) in attributes:
continue
value = getattr(self.__component, attribute)
value = ", ".join(value) if type(value) in (tuple, list) else value
roles = {Qt.DisplayRole: value,
Qt.EditRole: value}
self[attribute] = umbra.ui.nodes.GraphModelAttribute(attribute, value, roles, attributes_flags)
self.update_tool_tip() | def function[__initialize_node, parameter[self, attributes_flags]]:
constant[
Initializes the node.
:param attributes_flags: Attributes flags.
:type attributes_flags: int
]
variable[attributes] assign[=] call[name[dir], parameter[name[self].__component]]
for taget[name[attribute]] in starred[name[attributes]] begin[:]
if compare[name[attribute] equal[==] constant[name]] begin[:]
continue
if <ast.UnaryOp object at 0x7da1b09b84f0> begin[:]
continue
variable[value] assign[=] call[name[getattr], parameter[name[self].__component, name[attribute]]]
variable[value] assign[=] <ast.IfExp object at 0x7da1b09ba410>
variable[roles] assign[=] dictionary[[<ast.Attribute object at 0x7da1b09bba60>, <ast.Attribute object at 0x7da1b09b8400>], [<ast.Name object at 0x7da1b09ba710>, <ast.Name object at 0x7da1b09bbaf0>]]
call[name[self]][name[attribute]] assign[=] call[name[umbra].ui.nodes.GraphModelAttribute, parameter[name[attribute], name[value], name[roles], name[attributes_flags]]]
call[name[self].update_tool_tip, parameter[]] | keyword[def] identifier[__initialize_node] ( identifier[self] , identifier[attributes_flags] = identifier[int] ( identifier[Qt] . identifier[ItemIsSelectable] | identifier[Qt] . identifier[ItemIsEnabled] )):
literal[string]
identifier[attributes] = identifier[dir] ( identifier[self] . identifier[__component] )
keyword[for] identifier[attribute] keyword[in] identifier[attributes] :
keyword[if] identifier[attribute] == literal[string] :
keyword[continue]
keyword[if] keyword[not] literal[string] . identifier[format] ( identifier[attribute] ) keyword[in] identifier[attributes] :
keyword[continue]
identifier[value] = identifier[getattr] ( identifier[self] . identifier[__component] , identifier[attribute] )
identifier[value] = literal[string] . identifier[join] ( identifier[value] ) keyword[if] identifier[type] ( identifier[value] ) keyword[in] ( identifier[tuple] , identifier[list] ) keyword[else] identifier[value]
identifier[roles] ={ identifier[Qt] . identifier[DisplayRole] : identifier[value] ,
identifier[Qt] . identifier[EditRole] : identifier[value] }
identifier[self] [ identifier[attribute] ]= identifier[umbra] . identifier[ui] . identifier[nodes] . identifier[GraphModelAttribute] ( identifier[attribute] , identifier[value] , identifier[roles] , identifier[attributes_flags] )
identifier[self] . identifier[update_tool_tip] () | def __initialize_node(self, attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)):
"""
Initializes the node.
:param attributes_flags: Attributes flags.
:type attributes_flags: int
"""
attributes = dir(self.__component)
for attribute in attributes:
if attribute == 'name':
continue # depends on [control=['if'], data=[]]
if not '_Profile__{0}'.format(attribute) in attributes:
continue # depends on [control=['if'], data=[]]
value = getattr(self.__component, attribute)
value = ', '.join(value) if type(value) in (tuple, list) else value
roles = {Qt.DisplayRole: value, Qt.EditRole: value}
self[attribute] = umbra.ui.nodes.GraphModelAttribute(attribute, value, roles, attributes_flags) # depends on [control=['for'], data=['attribute']]
self.update_tool_tip() |
def get_alert(self, id, **kwargs): # noqa: E501
"""Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_alert_with_http_info(id, **kwargs) # noqa: E501
return data | def function[get_alert, parameter[self, id]]:
constant[Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].get_alert_with_http_info, parameter[name[id]]]] | keyword[def] identifier[get_alert] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_alert_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[get_alert_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def get_alert(self, id, **kwargs): # noqa: E501
'Get a specific alert # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_alert(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str id: (required)\n :return: ResponseContainerAlert\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_with_http_info(id, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.get_alert_with_http_info(id, **kwargs) # noqa: E501
return data |
def _get_pkg_install_time(pkg, arch=None):
'''
Return package install time, based on the /var/lib/dpkg/info/<package>.list
:return:
'''
iso_time = iso_time_t = None
loc_root = '/var/lib/dpkg/info'
if pkg is not None:
locations = []
if arch is not None and arch != 'all':
locations.append(os.path.join(loc_root, '{0}:{1}.list'.format(pkg, arch)))
locations.append(os.path.join(loc_root, '{0}.list'.format(pkg)))
for location in locations:
try:
iso_time_t = int(os.path.getmtime(location))
iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z'
break
except OSError:
pass
if iso_time is None:
log.debug('Unable to get package installation time for package "%s".', pkg)
return iso_time, iso_time_t | def function[_get_pkg_install_time, parameter[pkg, arch]]:
constant[
Return package install time, based on the /var/lib/dpkg/info/<package>.list
:return:
]
variable[iso_time] assign[=] constant[None]
variable[loc_root] assign[=] constant[/var/lib/dpkg/info]
if compare[name[pkg] is_not constant[None]] begin[:]
variable[locations] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b215e740> begin[:]
call[name[locations].append, parameter[call[name[os].path.join, parameter[name[loc_root], call[constant[{0}:{1}.list].format, parameter[name[pkg], name[arch]]]]]]]
call[name[locations].append, parameter[call[name[os].path.join, parameter[name[loc_root], call[constant[{0}.list].format, parameter[name[pkg]]]]]]]
for taget[name[location]] in starred[name[locations]] begin[:]
<ast.Try object at 0x7da1b215db40>
if compare[name[iso_time] is constant[None]] begin[:]
call[name[log].debug, parameter[constant[Unable to get package installation time for package "%s".], name[pkg]]]
return[tuple[[<ast.Name object at 0x7da1b215de70>, <ast.Name object at 0x7da1b215d030>]]] | keyword[def] identifier[_get_pkg_install_time] ( identifier[pkg] , identifier[arch] = keyword[None] ):
literal[string]
identifier[iso_time] = identifier[iso_time_t] = keyword[None]
identifier[loc_root] = literal[string]
keyword[if] identifier[pkg] keyword[is] keyword[not] keyword[None] :
identifier[locations] =[]
keyword[if] identifier[arch] keyword[is] keyword[not] keyword[None] keyword[and] identifier[arch] != literal[string] :
identifier[locations] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[loc_root] , literal[string] . identifier[format] ( identifier[pkg] , identifier[arch] )))
identifier[locations] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[loc_root] , literal[string] . identifier[format] ( identifier[pkg] )))
keyword[for] identifier[location] keyword[in] identifier[locations] :
keyword[try] :
identifier[iso_time_t] = identifier[int] ( identifier[os] . identifier[path] . identifier[getmtime] ( identifier[location] ))
identifier[iso_time] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[iso_time_t] ). identifier[isoformat] ()+ literal[string]
keyword[break]
keyword[except] identifier[OSError] :
keyword[pass]
keyword[if] identifier[iso_time] keyword[is] keyword[None] :
identifier[log] . identifier[debug] ( literal[string] , identifier[pkg] )
keyword[return] identifier[iso_time] , identifier[iso_time_t] | def _get_pkg_install_time(pkg, arch=None):
"""
Return package install time, based on the /var/lib/dpkg/info/<package>.list
:return:
"""
iso_time = iso_time_t = None
loc_root = '/var/lib/dpkg/info'
if pkg is not None:
locations = []
if arch is not None and arch != 'all':
locations.append(os.path.join(loc_root, '{0}:{1}.list'.format(pkg, arch))) # depends on [control=['if'], data=[]]
locations.append(os.path.join(loc_root, '{0}.list'.format(pkg)))
for location in locations:
try:
iso_time_t = int(os.path.getmtime(location))
iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z'
break # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['location']]
if iso_time is None:
log.debug('Unable to get package installation time for package "%s".', pkg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pkg']]
return (iso_time, iso_time_t) |
async def handle_frame(self, frame):
"""Handle incoming API frame, return True if this was the expected frame."""
if not isinstance(frame, FrameSetUTCConfirmation):
return False
self.success = True
return True | <ast.AsyncFunctionDef object at 0x7da204960cd0> | keyword[async] keyword[def] identifier[handle_frame] ( identifier[self] , identifier[frame] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[frame] , identifier[FrameSetUTCConfirmation] ):
keyword[return] keyword[False]
identifier[self] . identifier[success] = keyword[True]
keyword[return] keyword[True] | async def handle_frame(self, frame):
"""Handle incoming API frame, return True if this was the expected frame."""
if not isinstance(frame, FrameSetUTCConfirmation):
return False # depends on [control=['if'], data=[]]
self.success = True
return True |
def assign_shifts_view(request, semester):
"""
View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts.
"""
page_name = "Assign Shifts"
auto_assign_shifts_form = None
random_assign_instances_form = None
clear_assign_form = None
if WorkshiftPool.objects.filter(semester=semester).count():
auto_assign_shifts_form = AutoAssignShiftForm(
data=request.POST if AutoAssignShiftForm.name in request.POST else None,
semester=semester,
)
random_assign_instances_form = RandomAssignInstancesForm(
data=request.POST if RandomAssignInstancesForm.name in request.POST else None,
semester=semester,
)
clear_assign_form = ClearAssignmentsForm(
data=request.POST if ClearAssignmentsForm.name in request.POST else None,
semester=semester,
)
forms = [auto_assign_shifts_form, random_assign_instances_form,
clear_assign_form]
if auto_assign_shifts_form and auto_assign_shifts_form.is_valid():
unassigned_profiles = auto_assign_shifts_form.save()
message = "Assigned workshifters to regular workshifts."
if unassigned_profiles:
message += " The following workshifters were not given " \
"complete assignments: "
message += ", ".join(i.user.get_full_name() for i in unassigned_profiles)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
if random_assign_instances_form and random_assign_instances_form.is_valid():
unassigned_profiles, unassigned_shifts = \
random_assign_instances_form.save()
message = "Assigned workshifters randomly to instances within {}." \
.format(random_assign_instances_form.cleaned_data["pool"])
if unassigned_profiles:
message += "The following workshifers were not given " \
"complete assignments: "
message += ", ".join(i.user.get_full_name() for i in unassigned_profiles)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
if clear_assign_form and clear_assign_form.is_valid():
clear_assign_form.save()
messages.add_message(
request,
messages.INFO,
"Cleared all workshifters from their regular workshift "
"assignments",
)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
shifts = RegularWorkshift.objects.filter(
pool__semester=semester,
active=True,
).exclude(
workshift_type__assignment=WorkshiftType.NO_ASSIGN,
)
assign_forms = []
for shift in shifts:
form = AssignShiftForm(
data=request.POST if "individual_assign" in request.POST else None,
prefix="shift-{}".format(shift.pk),
instance=shift,
semester=semester,
)
assign_forms.append(form)
if assign_forms and all(i.is_valid() for i in assign_forms):
for form in assign_forms:
form.save()
messages.add_message(
request,
messages.INFO,
"Workshift assignments saved.",
)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pools = WorkshiftPool.objects.filter(semester=semester).order_by(
"-is_primary", "title",
)
pool_hours = []
for workshifter in workshifters:
hours_owed = []
for pool in pools:
hours = workshifter.pool_hours.get(pool=pool)
hours_owed.append(hours.hours - hours.assigned_hours)
if any(i > 0 for i in hours_owed):
pool_hours.append(hours_owed)
total_pool_hours = [
sum(hours[i] for hours in pool_hours)
for i in range(len(pool_hours[0]) if len(pool_hours) > 0 else 0)
]
return render_to_response("assign_shifts.html", {
"page_name": page_name,
"forms": forms,
"assign_forms": assign_forms,
"unassigned_profiles": zip(workshifters, pool_hours),
"pools": pools,
"total_pool_hours": total_pool_hours,
}, context_instance=RequestContext(request)) | def function[assign_shifts_view, parameter[request, semester]]:
constant[
View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts.
]
variable[page_name] assign[=] constant[Assign Shifts]
variable[auto_assign_shifts_form] assign[=] constant[None]
variable[random_assign_instances_form] assign[=] constant[None]
variable[clear_assign_form] assign[=] constant[None]
if call[call[name[WorkshiftPool].objects.filter, parameter[]].count, parameter[]] begin[:]
variable[auto_assign_shifts_form] assign[=] call[name[AutoAssignShiftForm], parameter[]]
variable[random_assign_instances_form] assign[=] call[name[RandomAssignInstancesForm], parameter[]]
variable[clear_assign_form] assign[=] call[name[ClearAssignmentsForm], parameter[]]
variable[forms] assign[=] list[[<ast.Name object at 0x7da1b13270d0>, <ast.Name object at 0x7da1b13270a0>, <ast.Name object at 0x7da1b1327070>]]
if <ast.BoolOp object at 0x7da1b1327010> begin[:]
variable[unassigned_profiles] assign[=] call[name[auto_assign_shifts_form].save, parameter[]]
variable[message] assign[=] constant[Assigned workshifters to regular workshifts.]
if name[unassigned_profiles] begin[:]
<ast.AugAssign object at 0x7da1b1326d10>
<ast.AugAssign object at 0x7da1b1326c80>
call[name[messages].add_message, parameter[name[request], name[messages].INFO, name[message]]]
return[call[name[HttpResponseRedirect], parameter[call[name[wurl], parameter[constant[workshift:assign_shifts]]]]]]
if <ast.BoolOp object at 0x7da1b1326620> begin[:]
<ast.Tuple object at 0x7da1b13264d0> assign[=] call[name[random_assign_instances_form].save, parameter[]]
variable[message] assign[=] call[constant[Assigned workshifters randomly to instances within {}.].format, parameter[call[name[random_assign_instances_form].cleaned_data][constant[pool]]]]
if name[unassigned_profiles] begin[:]
<ast.AugAssign object at 0x7da1b13261a0>
<ast.AugAssign object at 0x7da1b1326110>
call[name[messages].add_message, parameter[name[request], name[messages].INFO, name[message]]]
return[call[name[HttpResponseRedirect], parameter[call[name[wurl], parameter[constant[workshift:assign_shifts]]]]]]
if <ast.BoolOp object at 0x7da1b1325ab0> begin[:]
call[name[clear_assign_form].save, parameter[]]
call[name[messages].add_message, parameter[name[request], name[messages].INFO, constant[Cleared all workshifters from their regular workshift assignments]]]
return[call[name[HttpResponseRedirect], parameter[call[name[wurl], parameter[constant[workshift:assign_shifts]]]]]]
variable[shifts] assign[=] call[call[name[RegularWorkshift].objects.filter, parameter[]].exclude, parameter[]]
variable[assign_forms] assign[=] list[[]]
for taget[name[shift]] in starred[name[shifts]] begin[:]
variable[form] assign[=] call[name[AssignShiftForm], parameter[]]
call[name[assign_forms].append, parameter[name[form]]]
if <ast.BoolOp object at 0x7da1b1497910> begin[:]
for taget[name[form]] in starred[name[assign_forms]] begin[:]
call[name[form].save, parameter[]]
call[name[messages].add_message, parameter[name[request], name[messages].INFO, constant[Workshift assignments saved.]]]
return[call[name[HttpResponseRedirect], parameter[call[name[wurl], parameter[constant[workshift:assign_shifts]]]]]]
variable[workshifters] assign[=] call[name[WorkshiftProfile].objects.filter, parameter[]]
variable[pools] assign[=] call[call[name[WorkshiftPool].objects.filter, parameter[]].order_by, parameter[constant[-is_primary], constant[title]]]
variable[pool_hours] assign[=] list[[]]
for taget[name[workshifter]] in starred[name[workshifters]] begin[:]
variable[hours_owed] assign[=] list[[]]
for taget[name[pool]] in starred[name[pools]] begin[:]
variable[hours] assign[=] call[name[workshifter].pool_hours.get, parameter[]]
call[name[hours_owed].append, parameter[binary_operation[name[hours].hours - name[hours].assigned_hours]]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b14967d0>]] begin[:]
call[name[pool_hours].append, parameter[name[hours_owed]]]
variable[total_pool_hours] assign[=] <ast.ListComp object at 0x7da1b14964d0>
return[call[name[render_to_response], parameter[constant[assign_shifts.html], dictionary[[<ast.Constant object at 0x7da1b1495e70>, <ast.Constant object at 0x7da1b1495e40>, <ast.Constant object at 0x7da1b1495e10>, <ast.Constant object at 0x7da1b1495de0>, <ast.Constant object at 0x7da1b1495db0>, <ast.Constant object at 0x7da1b1495d80>], [<ast.Name object at 0x7da1b1495d20>, <ast.Name object at 0x7da1b1495cf0>, <ast.Name object at 0x7da1b1495cc0>, <ast.Call object at 0x7da1b1495c90>, <ast.Name object at 0x7da1b1495bd0>, <ast.Name object at 0x7da1b1495ba0>]]]]] | keyword[def] identifier[assign_shifts_view] ( identifier[request] , identifier[semester] ):
literal[string]
identifier[page_name] = literal[string]
identifier[auto_assign_shifts_form] = keyword[None]
identifier[random_assign_instances_form] = keyword[None]
identifier[clear_assign_form] = keyword[None]
keyword[if] identifier[WorkshiftPool] . identifier[objects] . identifier[filter] ( identifier[semester] = identifier[semester] ). identifier[count] ():
identifier[auto_assign_shifts_form] = identifier[AutoAssignShiftForm] (
identifier[data] = identifier[request] . identifier[POST] keyword[if] identifier[AutoAssignShiftForm] . identifier[name] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[semester] = identifier[semester] ,
)
identifier[random_assign_instances_form] = identifier[RandomAssignInstancesForm] (
identifier[data] = identifier[request] . identifier[POST] keyword[if] identifier[RandomAssignInstancesForm] . identifier[name] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[semester] = identifier[semester] ,
)
identifier[clear_assign_form] = identifier[ClearAssignmentsForm] (
identifier[data] = identifier[request] . identifier[POST] keyword[if] identifier[ClearAssignmentsForm] . identifier[name] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[semester] = identifier[semester] ,
)
identifier[forms] =[ identifier[auto_assign_shifts_form] , identifier[random_assign_instances_form] ,
identifier[clear_assign_form] ]
keyword[if] identifier[auto_assign_shifts_form] keyword[and] identifier[auto_assign_shifts_form] . identifier[is_valid] ():
identifier[unassigned_profiles] = identifier[auto_assign_shifts_form] . identifier[save] ()
identifier[message] = literal[string]
keyword[if] identifier[unassigned_profiles] :
identifier[message] += literal[string] literal[string]
identifier[message] += literal[string] . identifier[join] ( identifier[i] . identifier[user] . identifier[get_full_name] () keyword[for] identifier[i] keyword[in] identifier[unassigned_profiles] )
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[INFO] , identifier[message] )
keyword[return] identifier[HttpResponseRedirect] ( identifier[wurl] (
literal[string] ,
identifier[sem_url] = identifier[semester] . identifier[sem_url] ,
))
keyword[if] identifier[random_assign_instances_form] keyword[and] identifier[random_assign_instances_form] . identifier[is_valid] ():
identifier[unassigned_profiles] , identifier[unassigned_shifts] = identifier[random_assign_instances_form] . identifier[save] ()
identifier[message] = literal[string] . identifier[format] ( identifier[random_assign_instances_form] . identifier[cleaned_data] [ literal[string] ])
keyword[if] identifier[unassigned_profiles] :
identifier[message] += literal[string] literal[string]
identifier[message] += literal[string] . identifier[join] ( identifier[i] . identifier[user] . identifier[get_full_name] () keyword[for] identifier[i] keyword[in] identifier[unassigned_profiles] )
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[INFO] , identifier[message] )
keyword[return] identifier[HttpResponseRedirect] ( identifier[wurl] (
literal[string] ,
identifier[sem_url] = identifier[semester] . identifier[sem_url] ,
))
keyword[if] identifier[clear_assign_form] keyword[and] identifier[clear_assign_form] . identifier[is_valid] ():
identifier[clear_assign_form] . identifier[save] ()
identifier[messages] . identifier[add_message] (
identifier[request] ,
identifier[messages] . identifier[INFO] ,
literal[string]
literal[string] ,
)
keyword[return] identifier[HttpResponseRedirect] ( identifier[wurl] (
literal[string] ,
identifier[sem_url] = identifier[semester] . identifier[sem_url] ,
))
identifier[shifts] = identifier[RegularWorkshift] . identifier[objects] . identifier[filter] (
identifier[pool__semester] = identifier[semester] ,
identifier[active] = keyword[True] ,
). identifier[exclude] (
identifier[workshift_type__assignment] = identifier[WorkshiftType] . identifier[NO_ASSIGN] ,
)
identifier[assign_forms] =[]
keyword[for] identifier[shift] keyword[in] identifier[shifts] :
identifier[form] = identifier[AssignShiftForm] (
identifier[data] = identifier[request] . identifier[POST] keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[else] keyword[None] ,
identifier[prefix] = literal[string] . identifier[format] ( identifier[shift] . identifier[pk] ),
identifier[instance] = identifier[shift] ,
identifier[semester] = identifier[semester] ,
)
identifier[assign_forms] . identifier[append] ( identifier[form] )
keyword[if] identifier[assign_forms] keyword[and] identifier[all] ( identifier[i] . identifier[is_valid] () keyword[for] identifier[i] keyword[in] identifier[assign_forms] ):
keyword[for] identifier[form] keyword[in] identifier[assign_forms] :
identifier[form] . identifier[save] ()
identifier[messages] . identifier[add_message] (
identifier[request] ,
identifier[messages] . identifier[INFO] ,
literal[string] ,
)
keyword[return] identifier[HttpResponseRedirect] ( identifier[wurl] (
literal[string] ,
identifier[sem_url] = identifier[semester] . identifier[sem_url] ,
))
identifier[workshifters] = identifier[WorkshiftProfile] . identifier[objects] . identifier[filter] ( identifier[semester] = identifier[semester] )
identifier[pools] = identifier[WorkshiftPool] . identifier[objects] . identifier[filter] ( identifier[semester] = identifier[semester] ). identifier[order_by] (
literal[string] , literal[string] ,
)
identifier[pool_hours] =[]
keyword[for] identifier[workshifter] keyword[in] identifier[workshifters] :
identifier[hours_owed] =[]
keyword[for] identifier[pool] keyword[in] identifier[pools] :
identifier[hours] = identifier[workshifter] . identifier[pool_hours] . identifier[get] ( identifier[pool] = identifier[pool] )
identifier[hours_owed] . identifier[append] ( identifier[hours] . identifier[hours] - identifier[hours] . identifier[assigned_hours] )
keyword[if] identifier[any] ( identifier[i] > literal[int] keyword[for] identifier[i] keyword[in] identifier[hours_owed] ):
identifier[pool_hours] . identifier[append] ( identifier[hours_owed] )
identifier[total_pool_hours] =[
identifier[sum] ( identifier[hours] [ identifier[i] ] keyword[for] identifier[hours] keyword[in] identifier[pool_hours] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[pool_hours] [ literal[int] ]) keyword[if] identifier[len] ( identifier[pool_hours] )> literal[int] keyword[else] literal[int] )
]
keyword[return] identifier[render_to_response] ( literal[string] ,{
literal[string] : identifier[page_name] ,
literal[string] : identifier[forms] ,
literal[string] : identifier[assign_forms] ,
literal[string] : identifier[zip] ( identifier[workshifters] , identifier[pool_hours] ),
literal[string] : identifier[pools] ,
literal[string] : identifier[total_pool_hours] ,
}, identifier[context_instance] = identifier[RequestContext] ( identifier[request] )) | def assign_shifts_view(request, semester):
"""
View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts.
"""
page_name = 'Assign Shifts'
auto_assign_shifts_form = None
random_assign_instances_form = None
clear_assign_form = None
if WorkshiftPool.objects.filter(semester=semester).count():
auto_assign_shifts_form = AutoAssignShiftForm(data=request.POST if AutoAssignShiftForm.name in request.POST else None, semester=semester)
random_assign_instances_form = RandomAssignInstancesForm(data=request.POST if RandomAssignInstancesForm.name in request.POST else None, semester=semester)
clear_assign_form = ClearAssignmentsForm(data=request.POST if ClearAssignmentsForm.name in request.POST else None, semester=semester) # depends on [control=['if'], data=[]]
forms = [auto_assign_shifts_form, random_assign_instances_form, clear_assign_form]
if auto_assign_shifts_form and auto_assign_shifts_form.is_valid():
unassigned_profiles = auto_assign_shifts_form.save()
message = 'Assigned workshifters to regular workshifts.'
if unassigned_profiles:
message += ' The following workshifters were not given complete assignments: '
message += ', '.join((i.user.get_full_name() for i in unassigned_profiles)) # depends on [control=['if'], data=[]]
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl('workshift:assign_shifts', sem_url=semester.sem_url)) # depends on [control=['if'], data=[]]
if random_assign_instances_form and random_assign_instances_form.is_valid():
(unassigned_profiles, unassigned_shifts) = random_assign_instances_form.save()
message = 'Assigned workshifters randomly to instances within {}.'.format(random_assign_instances_form.cleaned_data['pool'])
if unassigned_profiles:
message += 'The following workshifers were not given complete assignments: '
message += ', '.join((i.user.get_full_name() for i in unassigned_profiles)) # depends on [control=['if'], data=[]]
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl('workshift:assign_shifts', sem_url=semester.sem_url)) # depends on [control=['if'], data=[]]
if clear_assign_form and clear_assign_form.is_valid():
clear_assign_form.save()
messages.add_message(request, messages.INFO, 'Cleared all workshifters from their regular workshift assignments')
return HttpResponseRedirect(wurl('workshift:assign_shifts', sem_url=semester.sem_url)) # depends on [control=['if'], data=[]]
shifts = RegularWorkshift.objects.filter(pool__semester=semester, active=True).exclude(workshift_type__assignment=WorkshiftType.NO_ASSIGN)
assign_forms = []
for shift in shifts:
form = AssignShiftForm(data=request.POST if 'individual_assign' in request.POST else None, prefix='shift-{}'.format(shift.pk), instance=shift, semester=semester)
assign_forms.append(form) # depends on [control=['for'], data=['shift']]
if assign_forms and all((i.is_valid() for i in assign_forms)):
for form in assign_forms:
form.save() # depends on [control=['for'], data=['form']]
messages.add_message(request, messages.INFO, 'Workshift assignments saved.')
return HttpResponseRedirect(wurl('workshift:assign_shifts', sem_url=semester.sem_url)) # depends on [control=['if'], data=[]]
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pools = WorkshiftPool.objects.filter(semester=semester).order_by('-is_primary', 'title')
pool_hours = []
for workshifter in workshifters:
hours_owed = []
for pool in pools:
hours = workshifter.pool_hours.get(pool=pool)
hours_owed.append(hours.hours - hours.assigned_hours) # depends on [control=['for'], data=['pool']]
if any((i > 0 for i in hours_owed)):
pool_hours.append(hours_owed) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['workshifter']]
total_pool_hours = [sum((hours[i] for hours in pool_hours)) for i in range(len(pool_hours[0]) if len(pool_hours) > 0 else 0)]
return render_to_response('assign_shifts.html', {'page_name': page_name, 'forms': forms, 'assign_forms': assign_forms, 'unassigned_profiles': zip(workshifters, pool_hours), 'pools': pools, 'total_pool_hours': total_pool_hours}, context_instance=RequestContext(request)) |
def group(self):
"""
| Comment: The id of a group
"""
if self.api and self.group_id:
return self.api._get_group(self.group_id) | def function[group, parameter[self]]:
constant[
| Comment: The id of a group
]
if <ast.BoolOp object at 0x7da20c7cb1f0> begin[:]
return[call[name[self].api._get_group, parameter[name[self].group_id]]] | keyword[def] identifier[group] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[api] keyword[and] identifier[self] . identifier[group_id] :
keyword[return] identifier[self] . identifier[api] . identifier[_get_group] ( identifier[self] . identifier[group_id] ) | def group(self):
"""
| Comment: The id of a group
"""
if self.api and self.group_id:
return self.api._get_group(self.group_id) # depends on [control=['if'], data=[]] |
def size(self):
"""Total number of coefficients in the ScalarCoefs structure.
Example::
>>> sz = c.size
>>> N = c.nmax + 1
>>> L = N+ c.mmax * (2 * N - c.mmax - 1);
>>> assert sz == L
"""
N = self.nmax + 1;
NC = N + self.mmax * (2 * N - self.mmax - 1);
assert NC == len(self._vec)
return NC | def function[size, parameter[self]]:
constant[Total number of coefficients in the ScalarCoefs structure.
Example::
>>> sz = c.size
>>> N = c.nmax + 1
>>> L = N+ c.mmax * (2 * N - c.mmax - 1);
>>> assert sz == L
]
variable[N] assign[=] binary_operation[name[self].nmax + constant[1]]
variable[NC] assign[=] binary_operation[name[N] + binary_operation[name[self].mmax * binary_operation[binary_operation[binary_operation[constant[2] * name[N]] - name[self].mmax] - constant[1]]]]
assert[compare[name[NC] equal[==] call[name[len], parameter[name[self]._vec]]]]
return[name[NC]] | keyword[def] identifier[size] ( identifier[self] ):
literal[string]
identifier[N] = identifier[self] . identifier[nmax] + literal[int] ;
identifier[NC] = identifier[N] + identifier[self] . identifier[mmax] *( literal[int] * identifier[N] - identifier[self] . identifier[mmax] - literal[int] );
keyword[assert] identifier[NC] == identifier[len] ( identifier[self] . identifier[_vec] )
keyword[return] identifier[NC] | def size(self):
"""Total number of coefficients in the ScalarCoefs structure.
Example::
>>> sz = c.size
>>> N = c.nmax + 1
>>> L = N+ c.mmax * (2 * N - c.mmax - 1);
>>> assert sz == L
"""
N = self.nmax + 1
NC = N + self.mmax * (2 * N - self.mmax - 1)
assert NC == len(self._vec)
return NC |
def symbols(names, **args):
"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from symengine import symbols
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols(set(['a', 'b', 'c']))
set([a, b, c])
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols.
Ranges are indicated by a colon and the type of range is determined by
the character to the right of the colon. If the character is a digit
then all contiguous digits to the left are taken as the nonnegative
starting value (or 0 if there is no digit left of the colon) and all
contiguous digits to the right are taken as 1 greater than the ending
value::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5(:2)')
(x50, x51)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
If the character to the right of the colon is a letter, then the single
letter to the left (or 'a' if there is none) is taken as the start
and all characters in the lexicographic range *through* the letter to
the right are used as the range::
>>> symbols('x:z')
(x, y, z)
>>> symbols('x:c') # null range
()
>>> symbols('x(:c)')
(xa, xb, xc)
>>> symbols(':c')
(a, b, c)
>>> symbols('a:d, x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
Multiple ranges are supported; contiguous numerical ranges should be
separated by parentheses to disambiguate the ending number of one
range from the starting number of the next::
>>> symbols('x:2(1:3)')
(x01, x02, x11, x12)
>>> symbols(':3:2') # parsing is from left to right
(00, 01, 10, 11, 20, 21)
Only one pair of parentheses surrounding ranges are removed, so to
include parentheses around ranges, double them. And to include spaces,
commas, or colons, escape them with a backslash::
>>> symbols('x((a:b))')
(x(a), x(b))
>>> symbols('x(:1\,:2)') # or 'x((:1)\,(:2))'
(x(0,0), x(0,1))
"""
result = []
if isinstance(names, string_types):
marker = 0
literals = ['\,', '\:', '\ ']
for i in range(len(literals)):
lit = literals.pop(0)
if lit in names:
while chr(marker) in names:
marker += 1
lit_char = chr(marker)
marker += 1
names = names.replace(lit, lit_char)
literals.append((lit_char, lit[1:]))
def literal(s):
if literals:
for c, l in literals:
s = s.replace(c, l)
return s
names = names.strip()
as_seq = names.endswith(',')
if as_seq:
names = names[:-1].rstrip()
if not names:
raise ValueError('no symbols given')
# split on commas
names = [n.strip() for n in names.split(',')]
if not all(n for n in names):
raise ValueError('missing symbol between commas')
# split on spaces
for i in range(len(names) - 1, -1, -1):
names[i: i + 1] = names[i].split()
cls = args.pop('cls', Symbol)
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing symbol')
if ':' not in name:
symbol = cls(literal(name), **args)
result.append(symbol)
continue
split = _range.split(name)
# remove 1 layer of bounding parentheses around ranges
for i in range(len(split) - 1):
if i and ':' in split[i] and split[i] != ':' and \
split[i - 1].endswith('(') and \
split[i + 1].startswith(')'):
split[i - 1] = split[i - 1][:-1]
split[i + 1] = split[i + 1][1:]
for i, s in enumerate(split):
if ':' in s:
if s[-1].endswith(':'):
raise ValueError('missing end range')
a, b = s.split(':')
if b[-1] in string.digits:
a = 0 if not a else int(a)
b = int(b)
split[i] = [str(c) for c in range(a, b)]
else:
a = a or 'a'
split[i] = [string.ascii_letters[c] for c in range(
string.ascii_letters.index(a),
string.ascii_letters.index(b) + 1)] # inclusive
if not split[i]:
break
else:
split[i] = [s]
else:
seq = True
if len(split) == 1:
names = split[0]
else:
names = [''.join(s) for s in cartes(*split)]
if literals:
result.extend([cls(literal(s), **args) for s in names])
else:
result.extend([cls(s, **args) for s in names])
if not seq and len(result) <= 1:
if not result:
return ()
return result[0]
return tuple(result)
else:
for name in names:
result.append(symbols(name, **args))
return type(names)(result) | def function[symbols, parameter[names]]:
constant[
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from symengine import symbols
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols(set(['a', 'b', 'c']))
set([a, b, c])
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols.
Ranges are indicated by a colon and the type of range is determined by
the character to the right of the colon. If the character is a digit
then all contiguous digits to the left are taken as the nonnegative
starting value (or 0 if there is no digit left of the colon) and all
contiguous digits to the right are taken as 1 greater than the ending
value::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5(:2)')
(x50, x51)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
If the character to the right of the colon is a letter, then the single
letter to the left (or 'a' if there is none) is taken as the start
and all characters in the lexicographic range *through* the letter to
the right are used as the range::
>>> symbols('x:z')
(x, y, z)
>>> symbols('x:c') # null range
()
>>> symbols('x(:c)')
(xa, xb, xc)
>>> symbols(':c')
(a, b, c)
>>> symbols('a:d, x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
Multiple ranges are supported; contiguous numerical ranges should be
separated by parentheses to disambiguate the ending number of one
range from the starting number of the next::
>>> symbols('x:2(1:3)')
(x01, x02, x11, x12)
>>> symbols(':3:2') # parsing is from left to right
(00, 01, 10, 11, 20, 21)
Only one pair of parentheses surrounding ranges are removed, so to
include parentheses around ranges, double them. And to include spaces,
commas, or colons, escape them with a backslash::
>>> symbols('x((a:b))')
(x(a), x(b))
>>> symbols('x(:1\,:2)') # or 'x((:1)\,(:2))'
(x(0,0), x(0,1))
]
variable[result] assign[=] list[[]]
if call[name[isinstance], parameter[name[names], name[string_types]]] begin[:]
variable[marker] assign[=] constant[0]
variable[literals] assign[=] list[[<ast.Constant object at 0x7da1b111bbb0>, <ast.Constant object at 0x7da1b111bb80>, <ast.Constant object at 0x7da1b111bb50>]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[literals]]]]]] begin[:]
variable[lit] assign[=] call[name[literals].pop, parameter[constant[0]]]
if compare[name[lit] in name[names]] begin[:]
while compare[call[name[chr], parameter[name[marker]]] in name[names]] begin[:]
<ast.AugAssign object at 0x7da1b11d6aa0>
variable[lit_char] assign[=] call[name[chr], parameter[name[marker]]]
<ast.AugAssign object at 0x7da1b11d58d0>
variable[names] assign[=] call[name[names].replace, parameter[name[lit], name[lit_char]]]
call[name[literals].append, parameter[tuple[[<ast.Name object at 0x7da1b11d7d00>, <ast.Subscript object at 0x7da1b11d5fc0>]]]]
def function[literal, parameter[s]]:
if name[literals] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b11d52d0>, <ast.Name object at 0x7da1b11d67d0>]]] in starred[name[literals]] begin[:]
variable[s] assign[=] call[name[s].replace, parameter[name[c], name[l]]]
return[name[s]]
variable[names] assign[=] call[name[names].strip, parameter[]]
variable[as_seq] assign[=] call[name[names].endswith, parameter[constant[,]]]
if name[as_seq] begin[:]
variable[names] assign[=] call[call[name[names]][<ast.Slice object at 0x7da1b1118eb0>].rstrip, parameter[]]
if <ast.UnaryOp object at 0x7da1b1118f70> begin[:]
<ast.Raise object at 0x7da1b1118fd0>
variable[names] assign[=] <ast.ListComp object at 0x7da1b11190f0>
if <ast.UnaryOp object at 0x7da1b1119330> begin[:]
<ast.Raise object at 0x7da1b11194e0>
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[names]]] - constant[1]], <ast.UnaryOp object at 0x7da1b1118580>, <ast.UnaryOp object at 0x7da1b11185e0>]]] begin[:]
call[name[names]][<ast.Slice object at 0x7da1b11186d0>] assign[=] call[call[name[names]][name[i]].split, parameter[]]
variable[cls] assign[=] call[name[args].pop, parameter[constant[cls], name[Symbol]]]
variable[seq] assign[=] call[name[args].pop, parameter[constant[seq], name[as_seq]]]
for taget[name[name]] in starred[name[names]] begin[:]
if <ast.UnaryOp object at 0x7da1b111b850> begin[:]
<ast.Raise object at 0x7da1b111b7f0>
if compare[constant[:] <ast.NotIn object at 0x7da2590d7190> name[name]] begin[:]
variable[symbol] assign[=] call[name[cls], parameter[call[name[literal], parameter[name[name]]]]]
call[name[result].append, parameter[name[symbol]]]
continue
variable[split] assign[=] call[name[_range].split, parameter[name[name]]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[split]]] - constant[1]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1119d80> begin[:]
call[name[split]][binary_operation[name[i] - constant[1]]] assign[=] call[call[name[split]][binary_operation[name[i] - constant[1]]]][<ast.Slice object at 0x7da1b111b1f0>]
call[name[split]][binary_operation[name[i] + constant[1]]] assign[=] call[call[name[split]][binary_operation[name[i] + constant[1]]]][<ast.Slice object at 0x7da1b11183d0>]
for taget[tuple[[<ast.Name object at 0x7da1b1118310>, <ast.Name object at 0x7da1b11182e0>]]] in starred[call[name[enumerate], parameter[name[split]]]] begin[:]
if compare[constant[:] in name[s]] begin[:]
if call[call[name[s]][<ast.UnaryOp object at 0x7da1b11bded0>].endswith, parameter[constant[:]]] begin[:]
<ast.Raise object at 0x7da1b11bf8b0>
<ast.Tuple object at 0x7da1b11bf220> assign[=] call[name[s].split, parameter[constant[:]]]
if compare[call[name[b]][<ast.UnaryOp object at 0x7da1b11bc7c0>] in name[string].digits] begin[:]
variable[a] assign[=] <ast.IfExp object at 0x7da1b11bc460>
variable[b] assign[=] call[name[int], parameter[name[b]]]
call[name[split]][name[i]] assign[=] <ast.ListComp object at 0x7da1b11bf640>
if <ast.UnaryOp object at 0x7da1b11bc340> begin[:]
break
if <ast.BoolOp object at 0x7da1b11be020> begin[:]
if <ast.UnaryOp object at 0x7da1b11bd390> begin[:]
return[tuple[[]]]
return[call[name[result]][constant[0]]]
return[call[name[tuple], parameter[name[result]]]] | keyword[def] identifier[symbols] ( identifier[names] ,** identifier[args] ):
literal[string]
identifier[result] =[]
keyword[if] identifier[isinstance] ( identifier[names] , identifier[string_types] ):
identifier[marker] = literal[int]
identifier[literals] =[ literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[literals] )):
identifier[lit] = identifier[literals] . identifier[pop] ( literal[int] )
keyword[if] identifier[lit] keyword[in] identifier[names] :
keyword[while] identifier[chr] ( identifier[marker] ) keyword[in] identifier[names] :
identifier[marker] += literal[int]
identifier[lit_char] = identifier[chr] ( identifier[marker] )
identifier[marker] += literal[int]
identifier[names] = identifier[names] . identifier[replace] ( identifier[lit] , identifier[lit_char] )
identifier[literals] . identifier[append] (( identifier[lit_char] , identifier[lit] [ literal[int] :]))
keyword[def] identifier[literal] ( identifier[s] ):
keyword[if] identifier[literals] :
keyword[for] identifier[c] , identifier[l] keyword[in] identifier[literals] :
identifier[s] = identifier[s] . identifier[replace] ( identifier[c] , identifier[l] )
keyword[return] identifier[s]
identifier[names] = identifier[names] . identifier[strip] ()
identifier[as_seq] = identifier[names] . identifier[endswith] ( literal[string] )
keyword[if] identifier[as_seq] :
identifier[names] = identifier[names] [:- literal[int] ]. identifier[rstrip] ()
keyword[if] keyword[not] identifier[names] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[names] =[ identifier[n] . identifier[strip] () keyword[for] identifier[n] keyword[in] identifier[names] . identifier[split] ( literal[string] )]
keyword[if] keyword[not] identifier[all] ( identifier[n] keyword[for] identifier[n] keyword[in] identifier[names] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[names] )- literal[int] ,- literal[int] ,- literal[int] ):
identifier[names] [ identifier[i] : identifier[i] + literal[int] ]= identifier[names] [ identifier[i] ]. identifier[split] ()
identifier[cls] = identifier[args] . identifier[pop] ( literal[string] , identifier[Symbol] )
identifier[seq] = identifier[args] . identifier[pop] ( literal[string] , identifier[as_seq] )
keyword[for] identifier[name] keyword[in] identifier[names] :
keyword[if] keyword[not] identifier[name] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[name] :
identifier[symbol] = identifier[cls] ( identifier[literal] ( identifier[name] ),** identifier[args] )
identifier[result] . identifier[append] ( identifier[symbol] )
keyword[continue]
identifier[split] = identifier[_range] . identifier[split] ( identifier[name] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[split] )- literal[int] ):
keyword[if] identifier[i] keyword[and] literal[string] keyword[in] identifier[split] [ identifier[i] ] keyword[and] identifier[split] [ identifier[i] ]!= literal[string] keyword[and] identifier[split] [ identifier[i] - literal[int] ]. identifier[endswith] ( literal[string] ) keyword[and] identifier[split] [ identifier[i] + literal[int] ]. identifier[startswith] ( literal[string] ):
identifier[split] [ identifier[i] - literal[int] ]= identifier[split] [ identifier[i] - literal[int] ][:- literal[int] ]
identifier[split] [ identifier[i] + literal[int] ]= identifier[split] [ identifier[i] + literal[int] ][ literal[int] :]
keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[split] ):
keyword[if] literal[string] keyword[in] identifier[s] :
keyword[if] identifier[s] [- literal[int] ]. identifier[endswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[a] , identifier[b] = identifier[s] . identifier[split] ( literal[string] )
keyword[if] identifier[b] [- literal[int] ] keyword[in] identifier[string] . identifier[digits] :
identifier[a] = literal[int] keyword[if] keyword[not] identifier[a] keyword[else] identifier[int] ( identifier[a] )
identifier[b] = identifier[int] ( identifier[b] )
identifier[split] [ identifier[i] ]=[ identifier[str] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[range] ( identifier[a] , identifier[b] )]
keyword[else] :
identifier[a] = identifier[a] keyword[or] literal[string]
identifier[split] [ identifier[i] ]=[ identifier[string] . identifier[ascii_letters] [ identifier[c] ] keyword[for] identifier[c] keyword[in] identifier[range] (
identifier[string] . identifier[ascii_letters] . identifier[index] ( identifier[a] ),
identifier[string] . identifier[ascii_letters] . identifier[index] ( identifier[b] )+ literal[int] )]
keyword[if] keyword[not] identifier[split] [ identifier[i] ]:
keyword[break]
keyword[else] :
identifier[split] [ identifier[i] ]=[ identifier[s] ]
keyword[else] :
identifier[seq] = keyword[True]
keyword[if] identifier[len] ( identifier[split] )== literal[int] :
identifier[names] = identifier[split] [ literal[int] ]
keyword[else] :
identifier[names] =[ literal[string] . identifier[join] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[cartes] (* identifier[split] )]
keyword[if] identifier[literals] :
identifier[result] . identifier[extend] ([ identifier[cls] ( identifier[literal] ( identifier[s] ),** identifier[args] ) keyword[for] identifier[s] keyword[in] identifier[names] ])
keyword[else] :
identifier[result] . identifier[extend] ([ identifier[cls] ( identifier[s] ,** identifier[args] ) keyword[for] identifier[s] keyword[in] identifier[names] ])
keyword[if] keyword[not] identifier[seq] keyword[and] identifier[len] ( identifier[result] )<= literal[int] :
keyword[if] keyword[not] identifier[result] :
keyword[return] ()
keyword[return] identifier[result] [ literal[int] ]
keyword[return] identifier[tuple] ( identifier[result] )
keyword[else] :
keyword[for] identifier[name] keyword[in] identifier[names] :
identifier[result] . identifier[append] ( identifier[symbols] ( identifier[name] ,** identifier[args] ))
keyword[return] identifier[type] ( identifier[names] )( identifier[result] ) | def symbols(names, **args):
"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from symengine import symbols
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols(set(['a', 'b', 'c']))
set([a, b, c])
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols.
Ranges are indicated by a colon and the type of range is determined by
the character to the right of the colon. If the character is a digit
then all contiguous digits to the left are taken as the nonnegative
starting value (or 0 if there is no digit left of the colon) and all
contiguous digits to the right are taken as 1 greater than the ending
value::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5(:2)')
(x50, x51)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
If the character to the right of the colon is a letter, then the single
letter to the left (or 'a' if there is none) is taken as the start
and all characters in the lexicographic range *through* the letter to
the right are used as the range::
>>> symbols('x:z')
(x, y, z)
>>> symbols('x:c') # null range
()
>>> symbols('x(:c)')
(xa, xb, xc)
>>> symbols(':c')
(a, b, c)
>>> symbols('a:d, x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
Multiple ranges are supported; contiguous numerical ranges should be
separated by parentheses to disambiguate the ending number of one
range from the starting number of the next::
>>> symbols('x:2(1:3)')
(x01, x02, x11, x12)
>>> symbols(':3:2') # parsing is from left to right
(00, 01, 10, 11, 20, 21)
Only one pair of parentheses surrounding ranges are removed, so to
include parentheses around ranges, double them. And to include spaces,
commas, or colons, escape them with a backslash::
>>> symbols('x((a:b))')
(x(a), x(b))
>>> symbols('x(:1\\,:2)') # or 'x((:1)\\,(:2))'
(x(0,0), x(0,1))
"""
result = []
if isinstance(names, string_types):
marker = 0
literals = ['\\,', '\\:', '\\ ']
for i in range(len(literals)):
lit = literals.pop(0)
if lit in names:
while chr(marker) in names:
marker += 1 # depends on [control=['while'], data=[]]
lit_char = chr(marker)
marker += 1
names = names.replace(lit, lit_char)
literals.append((lit_char, lit[1:])) # depends on [control=['if'], data=['lit', 'names']] # depends on [control=['for'], data=[]]
def literal(s):
if literals:
for (c, l) in literals:
s = s.replace(c, l) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return s
names = names.strip()
as_seq = names.endswith(',')
if as_seq:
names = names[:-1].rstrip() # depends on [control=['if'], data=[]]
if not names:
raise ValueError('no symbols given') # depends on [control=['if'], data=[]]
# split on commas
names = [n.strip() for n in names.split(',')]
if not all((n for n in names)):
raise ValueError('missing symbol between commas') # depends on [control=['if'], data=[]]
# split on spaces
for i in range(len(names) - 1, -1, -1):
names[i:i + 1] = names[i].split() # depends on [control=['for'], data=['i']]
cls = args.pop('cls', Symbol)
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing symbol') # depends on [control=['if'], data=[]]
if ':' not in name:
symbol = cls(literal(name), **args)
result.append(symbol)
continue # depends on [control=['if'], data=['name']]
split = _range.split(name)
# remove 1 layer of bounding parentheses around ranges
for i in range(len(split) - 1):
if i and ':' in split[i] and (split[i] != ':') and split[i - 1].endswith('(') and split[i + 1].startswith(')'):
split[i - 1] = split[i - 1][:-1]
split[i + 1] = split[i + 1][1:] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
for (i, s) in enumerate(split):
if ':' in s:
if s[-1].endswith(':'):
raise ValueError('missing end range') # depends on [control=['if'], data=[]]
(a, b) = s.split(':')
if b[-1] in string.digits:
a = 0 if not a else int(a)
b = int(b)
split[i] = [str(c) for c in range(a, b)] # depends on [control=['if'], data=[]]
else:
a = a or 'a'
split[i] = [string.ascii_letters[c] for c in range(string.ascii_letters.index(a), string.ascii_letters.index(b) + 1)] # inclusive
if not split[i]:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['s']]
else:
split[i] = [s] # depends on [control=['for'], data=[]]
else:
seq = True
if len(split) == 1:
names = split[0] # depends on [control=['if'], data=[]]
else:
names = [''.join(s) for s in cartes(*split)]
if literals:
result.extend([cls(literal(s), **args) for s in names]) # depends on [control=['if'], data=[]]
else:
result.extend([cls(s, **args) for s in names]) # depends on [control=['for'], data=['name']]
if not seq and len(result) <= 1:
if not result:
return () # depends on [control=['if'], data=[]]
return result[0] # depends on [control=['if'], data=[]]
return tuple(result) # depends on [control=['if'], data=[]]
else:
for name in names:
result.append(symbols(name, **args)) # depends on [control=['for'], data=['name']]
return type(names)(result) |
def add_file_to_zip(zip_file, filename, archname=None, compress_type=None):
"""
Zip <filename> into <zip_file> as <archname>.
:param str|unicode zip_file: The file name of the zip file
:param str|unicode filename: The name of the file to add, including the path
:param str|unicode archname: The new name, with directories, of the file, the same as filename if not given
:param str|unicode compress_type: The compression type
"""
with zipfile.ZipFile(zip_file, 'a') as zf:
zf.write(filename, archname, compress_type) | def function[add_file_to_zip, parameter[zip_file, filename, archname, compress_type]]:
constant[
Zip <filename> into <zip_file> as <archname>.
:param str|unicode zip_file: The file name of the zip file
:param str|unicode filename: The name of the file to add, including the path
:param str|unicode archname: The new name, with directories, of the file, the same as filename if not given
:param str|unicode compress_type: The compression type
]
with call[name[zipfile].ZipFile, parameter[name[zip_file], constant[a]]] begin[:]
call[name[zf].write, parameter[name[filename], name[archname], name[compress_type]]] | keyword[def] identifier[add_file_to_zip] ( identifier[zip_file] , identifier[filename] , identifier[archname] = keyword[None] , identifier[compress_type] = keyword[None] ):
literal[string]
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[zip_file] , literal[string] ) keyword[as] identifier[zf] :
identifier[zf] . identifier[write] ( identifier[filename] , identifier[archname] , identifier[compress_type] ) | def add_file_to_zip(zip_file, filename, archname=None, compress_type=None):
"""
Zip <filename> into <zip_file> as <archname>.
:param str|unicode zip_file: The file name of the zip file
:param str|unicode filename: The name of the file to add, including the path
:param str|unicode archname: The new name, with directories, of the file, the same as filename if not given
:param str|unicode compress_type: The compression type
"""
with zipfile.ZipFile(zip_file, 'a') as zf:
zf.write(filename, archname, compress_type) # depends on [control=['with'], data=['zf']] |
def _del_subscription(self, a_filter, session):
"""
Delete a session subscription on a given topic
:param a_filter:
:param session:
:return:
"""
deleted = 0
try:
subscriptions = self._subscriptions[a_filter]
for index, (sub_session, qos) in enumerate(subscriptions):
if sub_session.client_id == session.client_id:
self.logger.debug("Removing subscription on topic '%s' for client %s" %
(a_filter, format_client_message(session=session)))
subscriptions.pop(index)
deleted += 1
break
except KeyError:
# Unsubscribe topic not found in current subscribed topics
pass
finally:
return deleted | def function[_del_subscription, parameter[self, a_filter, session]]:
constant[
Delete a session subscription on a given topic
:param a_filter:
:param session:
:return:
]
variable[deleted] assign[=] constant[0]
<ast.Try object at 0x7da18fe90070> | keyword[def] identifier[_del_subscription] ( identifier[self] , identifier[a_filter] , identifier[session] ):
literal[string]
identifier[deleted] = literal[int]
keyword[try] :
identifier[subscriptions] = identifier[self] . identifier[_subscriptions] [ identifier[a_filter] ]
keyword[for] identifier[index] ,( identifier[sub_session] , identifier[qos] ) keyword[in] identifier[enumerate] ( identifier[subscriptions] ):
keyword[if] identifier[sub_session] . identifier[client_id] == identifier[session] . identifier[client_id] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %
( identifier[a_filter] , identifier[format_client_message] ( identifier[session] = identifier[session] )))
identifier[subscriptions] . identifier[pop] ( identifier[index] )
identifier[deleted] += literal[int]
keyword[break]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[finally] :
keyword[return] identifier[deleted] | def _del_subscription(self, a_filter, session):
"""
Delete a session subscription on a given topic
:param a_filter:
:param session:
:return:
"""
deleted = 0
try:
subscriptions = self._subscriptions[a_filter]
for (index, (sub_session, qos)) in enumerate(subscriptions):
if sub_session.client_id == session.client_id:
self.logger.debug("Removing subscription on topic '%s' for client %s" % (a_filter, format_client_message(session=session)))
subscriptions.pop(index)
deleted += 1
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
# Unsubscribe topic not found in current subscribed topics
pass # depends on [control=['except'], data=[]]
finally:
return deleted |
def MPTfile(file_or_path):
"""Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a
numpy record array object and a list of comments
"""
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'rb')
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic != b'EC-Lab ASCII FILE\r\n':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match(b'Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
fieldnames = str3(next(mpt_file)).strip().split('\t')
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
## Must be able to parse files where commas are used for decimal points
converter_dict = dict(((i, comma_converter)
for i in range(len(fieldnames))))
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
converters=converter_dict)
return mpt_array, comments | def function[MPTfile, parameter[file_or_path]]:
constant[Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a
numpy record array object and a list of comments
]
if call[name[isinstance], parameter[name[file_or_path], name[str]]] begin[:]
variable[mpt_file] assign[=] call[name[open], parameter[name[file_or_path], constant[rb]]]
variable[magic] assign[=] call[name[next], parameter[name[mpt_file]]]
if compare[name[magic] not_equal[!=] constant[b'EC-Lab ASCII FILE\r\n']] begin[:]
<ast.Raise object at 0x7da2044c0a00>
variable[nb_headers_match] assign[=] call[name[re].match, parameter[constant[b'Nb header lines : (\\d+)\\s*$'], call[name[next], parameter[name[mpt_file]]]]]
variable[nb_headers] assign[=] call[name[int], parameter[call[name[nb_headers_match].group, parameter[constant[1]]]]]
if compare[name[nb_headers] less[<] constant[3]] begin[:]
<ast.Raise object at 0x7da2044c1960>
variable[comments] assign[=] <ast.ListComp object at 0x7da2044c2ef0>
variable[fieldnames] assign[=] call[call[call[name[str3], parameter[call[name[next], parameter[name[mpt_file]]]]].strip, parameter[]].split, parameter[constant[ ]]]
variable[record_type] assign[=] call[name[np].dtype, parameter[call[name[list], parameter[call[name[map], parameter[name[fieldname_to_dtype], name[fieldnames]]]]]]]
variable[converter_dict] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b191f9a0>]]
variable[mpt_array] assign[=] call[name[np].loadtxt, parameter[name[mpt_file]]]
return[tuple[[<ast.Name object at 0x7da1b191fb20>, <ast.Name object at 0x7da1b191c1c0>]]] | keyword[def] identifier[MPTfile] ( identifier[file_or_path] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[file_or_path] , identifier[str] ):
identifier[mpt_file] = identifier[open] ( identifier[file_or_path] , literal[string] )
keyword[else] :
identifier[mpt_file] = identifier[file_or_path]
identifier[magic] = identifier[next] ( identifier[mpt_file] )
keyword[if] identifier[magic] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[magic] )
identifier[nb_headers_match] = identifier[re] . identifier[match] ( literal[string] , identifier[next] ( identifier[mpt_file] ))
identifier[nb_headers] = identifier[int] ( identifier[nb_headers_match] . identifier[group] ( literal[int] ))
keyword[if] identifier[nb_headers] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[nb_headers] )
identifier[comments] =[ identifier[next] ( identifier[mpt_file] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nb_headers] - literal[int] )]
identifier[fieldnames] = identifier[str3] ( identifier[next] ( identifier[mpt_file] )). identifier[strip] (). identifier[split] ( literal[string] )
identifier[record_type] = identifier[np] . identifier[dtype] ( identifier[list] ( identifier[map] ( identifier[fieldname_to_dtype] , identifier[fieldnames] )))
identifier[converter_dict] = identifier[dict] ((( identifier[i] , identifier[comma_converter] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[fieldnames] ))))
identifier[mpt_array] = identifier[np] . identifier[loadtxt] ( identifier[mpt_file] , identifier[dtype] = identifier[record_type] ,
identifier[converters] = identifier[converter_dict] )
keyword[return] identifier[mpt_array] , identifier[comments] | def MPTfile(file_or_path):
"""Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a
numpy record array object and a list of comments
"""
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'rb') # depends on [control=['if'], data=[]]
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic != b'EC-Lab ASCII FILE\r\n':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) # depends on [control=['if'], data=['magic']]
nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError('Too few header lines: %d' % nb_headers) # depends on [control=['if'], data=['nb_headers']]
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
fieldnames = str3(next(mpt_file)).strip().split('\t')
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
## Must be able to parse files where commas are used for decimal points
converter_dict = dict(((i, comma_converter) for i in range(len(fieldnames))))
mpt_array = np.loadtxt(mpt_file, dtype=record_type, converters=converter_dict)
return (mpt_array, comments) |
def p_block(self, p):
"""block : LBRACE source_elements RBRACE"""
p[0] = self.asttypes.Block(p[2])
p[0].setpos(p) | def function[p_block, parameter[self, p]]:
constant[block : LBRACE source_elements RBRACE]
call[name[p]][constant[0]] assign[=] call[name[self].asttypes.Block, parameter[call[name[p]][constant[2]]]]
call[call[name[p]][constant[0]].setpos, parameter[name[p]]] | keyword[def] identifier[p_block] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[self] . identifier[asttypes] . identifier[Block] ( identifier[p] [ literal[int] ])
identifier[p] [ literal[int] ]. identifier[setpos] ( identifier[p] ) | def p_block(self, p):
"""block : LBRACE source_elements RBRACE"""
p[0] = self.asttypes.Block(p[2])
p[0].setpos(p) |
def getInstance(cls, *args):
'''
Returns a singleton instance of the class
'''
if not cls.__singleton:
cls.__singleton = DriverManager(*args)
return cls.__singleton | def function[getInstance, parameter[cls]]:
constant[
Returns a singleton instance of the class
]
if <ast.UnaryOp object at 0x7da1b15be1a0> begin[:]
name[cls].__singleton assign[=] call[name[DriverManager], parameter[<ast.Starred object at 0x7da1b15be410>]]
return[name[cls].__singleton] | keyword[def] identifier[getInstance] ( identifier[cls] ,* identifier[args] ):
literal[string]
keyword[if] keyword[not] identifier[cls] . identifier[__singleton] :
identifier[cls] . identifier[__singleton] = identifier[DriverManager] (* identifier[args] )
keyword[return] identifier[cls] . identifier[__singleton] | def getInstance(cls, *args):
"""
Returns a singleton instance of the class
"""
if not cls.__singleton:
cls.__singleton = DriverManager(*args) # depends on [control=['if'], data=[]]
return cls.__singleton |
def read_header(filename, return_idxs=False):
""" Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
returns
"""
with open(filename, 'rb') as fh:
header_dict = {}
header_idxs = {}
# Check this is a blimpy file
keyword, value, idx = read_next_header_keyword(fh)
try:
assert keyword == b'HEADER_START'
except AssertionError:
raise RuntimeError("Not a valid blimpy file.")
while True:
keyword, value, idx = read_next_header_keyword(fh)
if keyword == b'HEADER_END':
break
else:
header_dict[keyword] = value
header_idxs[keyword] = idx
if return_idxs:
return header_idxs
else:
return header_dict | def function[read_header, parameter[filename, return_idxs]]:
constant[ Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
returns
]
with call[name[open], parameter[name[filename], constant[rb]]] begin[:]
variable[header_dict] assign[=] dictionary[[], []]
variable[header_idxs] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da2041d9ff0> assign[=] call[name[read_next_header_keyword], parameter[name[fh]]]
<ast.Try object at 0x7da2041da050>
while constant[True] begin[:]
<ast.Tuple object at 0x7da2041d93f0> assign[=] call[name[read_next_header_keyword], parameter[name[fh]]]
if compare[name[keyword] equal[==] constant[b'HEADER_END']] begin[:]
break
if name[return_idxs] begin[:]
return[name[header_idxs]] | keyword[def] identifier[read_header] ( identifier[filename] , identifier[return_idxs] = keyword[False] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fh] :
identifier[header_dict] ={}
identifier[header_idxs] ={}
identifier[keyword] , identifier[value] , identifier[idx] = identifier[read_next_header_keyword] ( identifier[fh] )
keyword[try] :
keyword[assert] identifier[keyword] == literal[string]
keyword[except] identifier[AssertionError] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[while] keyword[True] :
identifier[keyword] , identifier[value] , identifier[idx] = identifier[read_next_header_keyword] ( identifier[fh] )
keyword[if] identifier[keyword] == literal[string] :
keyword[break]
keyword[else] :
identifier[header_dict] [ identifier[keyword] ]= identifier[value]
identifier[header_idxs] [ identifier[keyword] ]= identifier[idx]
keyword[if] identifier[return_idxs] :
keyword[return] identifier[header_idxs]
keyword[else] :
keyword[return] identifier[header_dict] | def read_header(filename, return_idxs=False):
""" Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
returns
"""
with open(filename, 'rb') as fh:
header_dict = {}
header_idxs = {}
# Check this is a blimpy file
(keyword, value, idx) = read_next_header_keyword(fh)
try:
assert keyword == b'HEADER_START' # depends on [control=['try'], data=[]]
except AssertionError:
raise RuntimeError('Not a valid blimpy file.') # depends on [control=['except'], data=[]]
while True:
(keyword, value, idx) = read_next_header_keyword(fh)
if keyword == b'HEADER_END':
break # depends on [control=['if'], data=[]]
else:
header_dict[keyword] = value
header_idxs[keyword] = idx # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['fh']]
if return_idxs:
return header_idxs # depends on [control=['if'], data=[]]
else:
return header_dict |
def create_blocking_connection(host):
"""
Return properly created blocking connection.
Args:
host (str): Host as it is defined in :func:`.get_amqp_settings`.
Uses :func:`edeposit.amqp.amqpdaemon.getConParams`.
"""
return pika.BlockingConnection(
amqpdaemon.getConParams(
settings.get_amqp_settings()[host.lower()]["vhost"]
)
) | def function[create_blocking_connection, parameter[host]]:
constant[
Return properly created blocking connection.
Args:
host (str): Host as it is defined in :func:`.get_amqp_settings`.
Uses :func:`edeposit.amqp.amqpdaemon.getConParams`.
]
return[call[name[pika].BlockingConnection, parameter[call[name[amqpdaemon].getConParams, parameter[call[call[call[name[settings].get_amqp_settings, parameter[]]][call[name[host].lower, parameter[]]]][constant[vhost]]]]]]] | keyword[def] identifier[create_blocking_connection] ( identifier[host] ):
literal[string]
keyword[return] identifier[pika] . identifier[BlockingConnection] (
identifier[amqpdaemon] . identifier[getConParams] (
identifier[settings] . identifier[get_amqp_settings] ()[ identifier[host] . identifier[lower] ()][ literal[string] ]
)
) | def create_blocking_connection(host):
"""
Return properly created blocking connection.
Args:
host (str): Host as it is defined in :func:`.get_amqp_settings`.
Uses :func:`edeposit.amqp.amqpdaemon.getConParams`.
"""
return pika.BlockingConnection(amqpdaemon.getConParams(settings.get_amqp_settings()[host.lower()]['vhost'])) |
def save_to_local(self, callback_etat=print):
"""
Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args
"""
callback_etat("Aquisition...", 0, 3)
d = self.dumps()
s = json.dumps(d, indent=4, cls=formats.JsonEncoder)
callback_etat("Chiffrement...", 1, 3)
s = security.protege_data(s, True)
callback_etat("Enregistrement...", 2, 3)
try:
with open(self.LOCAL_DB_PATH, 'wb') as f:
f.write(s)
except (FileNotFoundError):
logging.exception(self.__class__.__name__)
raise StructureError("Chemin de sauvegarde introuvable !") | def function[save_to_local, parameter[self, callback_etat]]:
constant[
Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args
]
call[name[callback_etat], parameter[constant[Aquisition...], constant[0], constant[3]]]
variable[d] assign[=] call[name[self].dumps, parameter[]]
variable[s] assign[=] call[name[json].dumps, parameter[name[d]]]
call[name[callback_etat], parameter[constant[Chiffrement...], constant[1], constant[3]]]
variable[s] assign[=] call[name[security].protege_data, parameter[name[s], constant[True]]]
call[name[callback_etat], parameter[constant[Enregistrement...], constant[2], constant[3]]]
<ast.Try object at 0x7da1b1191510> | keyword[def] identifier[save_to_local] ( identifier[self] , identifier[callback_etat] = identifier[print] ):
literal[string]
identifier[callback_etat] ( literal[string] , literal[int] , literal[int] )
identifier[d] = identifier[self] . identifier[dumps] ()
identifier[s] = identifier[json] . identifier[dumps] ( identifier[d] , identifier[indent] = literal[int] , identifier[cls] = identifier[formats] . identifier[JsonEncoder] )
identifier[callback_etat] ( literal[string] , literal[int] , literal[int] )
identifier[s] = identifier[security] . identifier[protege_data] ( identifier[s] , keyword[True] )
identifier[callback_etat] ( literal[string] , literal[int] , literal[int] )
keyword[try] :
keyword[with] identifier[open] ( identifier[self] . identifier[LOCAL_DB_PATH] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[s] )
keyword[except] ( identifier[FileNotFoundError] ):
identifier[logging] . identifier[exception] ( identifier[self] . identifier[__class__] . identifier[__name__] )
keyword[raise] identifier[StructureError] ( literal[string] ) | def save_to_local(self, callback_etat=print):
"""
Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args
"""
callback_etat('Aquisition...', 0, 3)
d = self.dumps()
s = json.dumps(d, indent=4, cls=formats.JsonEncoder)
callback_etat('Chiffrement...', 1, 3)
s = security.protege_data(s, True)
callback_etat('Enregistrement...', 2, 3)
try:
with open(self.LOCAL_DB_PATH, 'wb') as f:
f.write(s) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except FileNotFoundError:
logging.exception(self.__class__.__name__)
raise StructureError('Chemin de sauvegarde introuvable !') # depends on [control=['except'], data=[]] |
def contains_none_of(self, elements):
"""
Ensures :attr:`subject` contains none of *elements*, which must be an iterable.
"""
for element in elements:
self._run(unittest_case.assertNotIn, (element, self._subject))
return ChainInspector(self._subject) | def function[contains_none_of, parameter[self, elements]]:
constant[
Ensures :attr:`subject` contains none of *elements*, which must be an iterable.
]
for taget[name[element]] in starred[name[elements]] begin[:]
call[name[self]._run, parameter[name[unittest_case].assertNotIn, tuple[[<ast.Name object at 0x7da18bcca5f0>, <ast.Attribute object at 0x7da18bccb820>]]]]
return[call[name[ChainInspector], parameter[name[self]._subject]]] | keyword[def] identifier[contains_none_of] ( identifier[self] , identifier[elements] ):
literal[string]
keyword[for] identifier[element] keyword[in] identifier[elements] :
identifier[self] . identifier[_run] ( identifier[unittest_case] . identifier[assertNotIn] ,( identifier[element] , identifier[self] . identifier[_subject] ))
keyword[return] identifier[ChainInspector] ( identifier[self] . identifier[_subject] ) | def contains_none_of(self, elements):
"""
Ensures :attr:`subject` contains none of *elements*, which must be an iterable.
"""
for element in elements:
self._run(unittest_case.assertNotIn, (element, self._subject)) # depends on [control=['for'], data=['element']]
return ChainInspector(self._subject) |
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args) | def function[validate_args, parameter[fname, args, max_fname_arg_count, compat_args]]:
constant[
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
]
call[name[_check_arg_length], parameter[name[fname], name[args], name[max_fname_arg_count], name[compat_args]]]
variable[kwargs] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[compat_args], name[args]]]]]
call[name[_check_for_default_values], parameter[name[fname], name[kwargs], name[compat_args]]] | keyword[def] identifier[validate_args] ( identifier[fname] , identifier[args] , identifier[max_fname_arg_count] , identifier[compat_args] ):
literal[string]
identifier[_check_arg_length] ( identifier[fname] , identifier[args] , identifier[max_fname_arg_count] , identifier[compat_args] )
identifier[kwargs] = identifier[dict] ( identifier[zip] ( identifier[compat_args] , identifier[args] ))
identifier[_check_for_default_values] ( identifier[fname] , identifier[kwargs] , identifier[compat_args] ) | def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args) |
def delete(self, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP DELETE request."""
return _RequestContextManager(
self._request(hdrs.METH_DELETE, url,
**kwargs)) | def function[delete, parameter[self, url]]:
constant[Perform HTTP DELETE request.]
return[call[name[_RequestContextManager], parameter[call[name[self]._request, parameter[name[hdrs].METH_DELETE, name[url]]]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[url] : identifier[StrOrURL] ,** identifier[kwargs] : identifier[Any] )-> literal[string] :
literal[string]
keyword[return] identifier[_RequestContextManager] (
identifier[self] . identifier[_request] ( identifier[hdrs] . identifier[METH_DELETE] , identifier[url] ,
** identifier[kwargs] )) | def delete(self, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP DELETE request."""
return _RequestContextManager(self._request(hdrs.METH_DELETE, url, **kwargs)) |
def mac(raw):
"""
Converts a raw string to a standardised MAC Address EUI Format.
:param raw: the raw string containing the value of the MAC Address
:return: a string with the MAC Address in EUI format
Example:
.. code-block:: python
>>> mac('0123.4567.89ab')
u'01:23:45:67:89:AB'
Some vendors like Cisco return MAC addresses like a9:c5:2e:7b:6: which is not entirely valid
(with respect to EUI48 or EUI64 standards). Therefore we need to stuff with trailing zeros
Example
>>> mac('a9:c5:2e:7b:6:')
u'A9:C5:2E:7B:60:00'
If Cisco or other obscure vendors use their own standards, will throw an error and we can fix
later, however, still works with weird formats like:
>>> mac('123.4567.89ab')
u'01:23:45:67:89:AB'
>>> mac('23.4567.89ab')
u'00:23:45:67:89:AB'
"""
if raw.endswith(':'):
flat_raw = raw.replace(':', '')
raw = '{flat_raw}{zeros_stuffed}'.format(
flat_raw=flat_raw,
zeros_stuffed='0'*(12-len(flat_raw))
)
return py23_compat.text_type(EUI(raw, dialect=_MACFormat)) | def function[mac, parameter[raw]]:
constant[
Converts a raw string to a standardised MAC Address EUI Format.
:param raw: the raw string containing the value of the MAC Address
:return: a string with the MAC Address in EUI format
Example:
.. code-block:: python
>>> mac('0123.4567.89ab')
u'01:23:45:67:89:AB'
Some vendors like Cisco return MAC addresses like a9:c5:2e:7b:6: which is not entirely valid
(with respect to EUI48 or EUI64 standards). Therefore we need to stuff with trailing zeros
Example
>>> mac('a9:c5:2e:7b:6:')
u'A9:C5:2E:7B:60:00'
If Cisco or other obscure vendors use their own standards, will throw an error and we can fix
later, however, still works with weird formats like:
>>> mac('123.4567.89ab')
u'01:23:45:67:89:AB'
>>> mac('23.4567.89ab')
u'00:23:45:67:89:AB'
]
if call[name[raw].endswith, parameter[constant[:]]] begin[:]
variable[flat_raw] assign[=] call[name[raw].replace, parameter[constant[:], constant[]]]
variable[raw] assign[=] call[constant[{flat_raw}{zeros_stuffed}].format, parameter[]]
return[call[name[py23_compat].text_type, parameter[call[name[EUI], parameter[name[raw]]]]]] | keyword[def] identifier[mac] ( identifier[raw] ):
literal[string]
keyword[if] identifier[raw] . identifier[endswith] ( literal[string] ):
identifier[flat_raw] = identifier[raw] . identifier[replace] ( literal[string] , literal[string] )
identifier[raw] = literal[string] . identifier[format] (
identifier[flat_raw] = identifier[flat_raw] ,
identifier[zeros_stuffed] = literal[string] *( literal[int] - identifier[len] ( identifier[flat_raw] ))
)
keyword[return] identifier[py23_compat] . identifier[text_type] ( identifier[EUI] ( identifier[raw] , identifier[dialect] = identifier[_MACFormat] )) | def mac(raw):
"""
Converts a raw string to a standardised MAC Address EUI Format.
:param raw: the raw string containing the value of the MAC Address
:return: a string with the MAC Address in EUI format
Example:
.. code-block:: python
>>> mac('0123.4567.89ab')
u'01:23:45:67:89:AB'
Some vendors like Cisco return MAC addresses like a9:c5:2e:7b:6: which is not entirely valid
(with respect to EUI48 or EUI64 standards). Therefore we need to stuff with trailing zeros
Example
>>> mac('a9:c5:2e:7b:6:')
u'A9:C5:2E:7B:60:00'
If Cisco or other obscure vendors use their own standards, will throw an error and we can fix
later, however, still works with weird formats like:
>>> mac('123.4567.89ab')
u'01:23:45:67:89:AB'
>>> mac('23.4567.89ab')
u'00:23:45:67:89:AB'
"""
if raw.endswith(':'):
flat_raw = raw.replace(':', '')
raw = '{flat_raw}{zeros_stuffed}'.format(flat_raw=flat_raw, zeros_stuffed='0' * (12 - len(flat_raw))) # depends on [control=['if'], data=[]]
return py23_compat.text_type(EUI(raw, dialect=_MACFormat)) |
def mod_categorical_expval(p):
"""
Expected value of categorical distribution with parent p of length k-1.
An implicit k'th category is assumed to exist with associated
probability 1-sum(p).
"""
p = extend_dirichlet(p)
return np.sum([p * i for i, p in enumerate(p)]) | def function[mod_categorical_expval, parameter[p]]:
constant[
Expected value of categorical distribution with parent p of length k-1.
An implicit k'th category is assumed to exist with associated
probability 1-sum(p).
]
variable[p] assign[=] call[name[extend_dirichlet], parameter[name[p]]]
return[call[name[np].sum, parameter[<ast.ListComp object at 0x7da18bcc8a30>]]] | keyword[def] identifier[mod_categorical_expval] ( identifier[p] ):
literal[string]
identifier[p] = identifier[extend_dirichlet] ( identifier[p] )
keyword[return] identifier[np] . identifier[sum] ([ identifier[p] * identifier[i] keyword[for] identifier[i] , identifier[p] keyword[in] identifier[enumerate] ( identifier[p] )]) | def mod_categorical_expval(p):
"""
Expected value of categorical distribution with parent p of length k-1.
An implicit k'th category is assumed to exist with associated
probability 1-sum(p).
"""
p = extend_dirichlet(p)
return np.sum([p * i for (i, p) in enumerate(p)]) |
def _decimal_to_xsd_format(value):
"""
Converts a decimal.Decimal value to its XSD decimal type value.
Result is a string containing the XSD decimal type's lexical value
representation. The conversion is done without any precision loss.
Note that Python's native decimal.Decimal string representation will
not do here as the lexical representation desired here does not allow
representing decimal values using float-like `<mantissa>E<exponent>'
format, e.g. 12E+30 or 0.10006E-12.
"""
value = XDecimal._decimal_canonical(value)
negative, digits, exponent = value.as_tuple()
# The following implementation assumes the following tuple decimal
# encoding (part of the canonical decimal value encoding):
# - digits must contain at least one element
# - no leading integral 0 digits except a single one in 0 (if a non-0
# decimal value has leading integral 0 digits they must be encoded
# in its 'exponent' value and not included explicitly in its
# 'digits' tuple)
assert digits
assert digits[0] != 0 or len(digits) == 1
result = []
if negative:
result.append("-")
# No fractional digits.
if exponent >= 0:
result.extend(str(x) for x in digits)
result.extend("0" * exponent)
return "".join(result)
digit_count = len(digits)
# Decimal point offset from the given digit start.
point_offset = digit_count + exponent
# Trim trailing fractional 0 digits.
fractional_digit_count = min(digit_count, -exponent)
while fractional_digit_count and digits[digit_count - 1] == 0:
digit_count -= 1
fractional_digit_count -= 1
# No trailing fractional 0 digits and a decimal point coming not after
# the given digits, meaning there is no need to add additional trailing
# integral 0 digits.
if point_offset <= 0:
# No integral digits.
result.append("0")
if digit_count > 0:
result.append(".")
result.append("0" * -point_offset)
result.extend(str(x) for x in digits[:digit_count])
else:
# Have integral and possibly some fractional digits.
result.extend(str(x) for x in digits[:point_offset])
if point_offset < digit_count:
result.append(".")
result.extend(str(x) for x in digits[point_offset:digit_count])
return "".join(result) | def function[_decimal_to_xsd_format, parameter[value]]:
constant[
Converts a decimal.Decimal value to its XSD decimal type value.
Result is a string containing the XSD decimal type's lexical value
representation. The conversion is done without any precision loss.
Note that Python's native decimal.Decimal string representation will
not do here as the lexical representation desired here does not allow
representing decimal values using float-like `<mantissa>E<exponent>'
format, e.g. 12E+30 or 0.10006E-12.
]
variable[value] assign[=] call[name[XDecimal]._decimal_canonical, parameter[name[value]]]
<ast.Tuple object at 0x7da1b061a0e0> assign[=] call[name[value].as_tuple, parameter[]]
assert[name[digits]]
assert[<ast.BoolOp object at 0x7da1b061b820>]
variable[result] assign[=] list[[]]
if name[negative] begin[:]
call[name[result].append, parameter[constant[-]]]
if compare[name[exponent] greater_or_equal[>=] constant[0]] begin[:]
call[name[result].extend, parameter[<ast.GeneratorExp object at 0x7da1b0619180>]]
call[name[result].extend, parameter[binary_operation[constant[0] * name[exponent]]]]
return[call[constant[].join, parameter[name[result]]]]
variable[digit_count] assign[=] call[name[len], parameter[name[digits]]]
variable[point_offset] assign[=] binary_operation[name[digit_count] + name[exponent]]
variable[fractional_digit_count] assign[=] call[name[min], parameter[name[digit_count], <ast.UnaryOp object at 0x7da1b0618910>]]
while <ast.BoolOp object at 0x7da1b0618b20> begin[:]
<ast.AugAssign object at 0x7da1b06195d0>
<ast.AugAssign object at 0x7da1b0619de0>
if compare[name[point_offset] less_or_equal[<=] constant[0]] begin[:]
call[name[result].append, parameter[constant[0]]]
if compare[name[digit_count] greater[>] constant[0]] begin[:]
call[name[result].append, parameter[constant[.]]]
call[name[result].append, parameter[binary_operation[constant[0] * <ast.UnaryOp object at 0x7da1b0619c30>]]]
call[name[result].extend, parameter[<ast.GeneratorExp object at 0x7da1b06199c0>]]
return[call[constant[].join, parameter[name[result]]]] | keyword[def] identifier[_decimal_to_xsd_format] ( identifier[value] ):
literal[string]
identifier[value] = identifier[XDecimal] . identifier[_decimal_canonical] ( identifier[value] )
identifier[negative] , identifier[digits] , identifier[exponent] = identifier[value] . identifier[as_tuple] ()
keyword[assert] identifier[digits]
keyword[assert] identifier[digits] [ literal[int] ]!= literal[int] keyword[or] identifier[len] ( identifier[digits] )== literal[int]
identifier[result] =[]
keyword[if] identifier[negative] :
identifier[result] . identifier[append] ( literal[string] )
keyword[if] identifier[exponent] >= literal[int] :
identifier[result] . identifier[extend] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[digits] )
identifier[result] . identifier[extend] ( literal[string] * identifier[exponent] )
keyword[return] literal[string] . identifier[join] ( identifier[result] )
identifier[digit_count] = identifier[len] ( identifier[digits] )
identifier[point_offset] = identifier[digit_count] + identifier[exponent]
identifier[fractional_digit_count] = identifier[min] ( identifier[digit_count] ,- identifier[exponent] )
keyword[while] identifier[fractional_digit_count] keyword[and] identifier[digits] [ identifier[digit_count] - literal[int] ]== literal[int] :
identifier[digit_count] -= literal[int]
identifier[fractional_digit_count] -= literal[int]
keyword[if] identifier[point_offset] <= literal[int] :
identifier[result] . identifier[append] ( literal[string] )
keyword[if] identifier[digit_count] > literal[int] :
identifier[result] . identifier[append] ( literal[string] )
identifier[result] . identifier[append] ( literal[string] *- identifier[point_offset] )
identifier[result] . identifier[extend] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[digits] [: identifier[digit_count] ])
keyword[else] :
identifier[result] . identifier[extend] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[digits] [: identifier[point_offset] ])
keyword[if] identifier[point_offset] < identifier[digit_count] :
identifier[result] . identifier[append] ( literal[string] )
identifier[result] . identifier[extend] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[digits] [ identifier[point_offset] : identifier[digit_count] ])
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def _decimal_to_xsd_format(value):
"""
Converts a decimal.Decimal value to its XSD decimal type value.
Result is a string containing the XSD decimal type's lexical value
representation. The conversion is done without any precision loss.
Note that Python's native decimal.Decimal string representation will
not do here as the lexical representation desired here does not allow
representing decimal values using float-like `<mantissa>E<exponent>'
format, e.g. 12E+30 or 0.10006E-12.
"""
value = XDecimal._decimal_canonical(value)
(negative, digits, exponent) = value.as_tuple()
# The following implementation assumes the following tuple decimal
# encoding (part of the canonical decimal value encoding):
# - digits must contain at least one element
# - no leading integral 0 digits except a single one in 0 (if a non-0
# decimal value has leading integral 0 digits they must be encoded
# in its 'exponent' value and not included explicitly in its
# 'digits' tuple)
assert digits
assert digits[0] != 0 or len(digits) == 1
result = []
if negative:
result.append('-') # depends on [control=['if'], data=[]]
# No fractional digits.
if exponent >= 0:
result.extend((str(x) for x in digits))
result.extend('0' * exponent)
return ''.join(result) # depends on [control=['if'], data=['exponent']]
digit_count = len(digits)
# Decimal point offset from the given digit start.
point_offset = digit_count + exponent
# Trim trailing fractional 0 digits.
fractional_digit_count = min(digit_count, -exponent)
while fractional_digit_count and digits[digit_count - 1] == 0:
digit_count -= 1
fractional_digit_count -= 1 # depends on [control=['while'], data=[]]
# No trailing fractional 0 digits and a decimal point coming not after
# the given digits, meaning there is no need to add additional trailing
# integral 0 digits.
if point_offset <= 0:
# No integral digits.
result.append('0')
if digit_count > 0:
result.append('.')
result.append('0' * -point_offset)
result.extend((str(x) for x in digits[:digit_count])) # depends on [control=['if'], data=['digit_count']] # depends on [control=['if'], data=['point_offset']]
else:
# Have integral and possibly some fractional digits.
result.extend((str(x) for x in digits[:point_offset]))
if point_offset < digit_count:
result.append('.')
result.extend((str(x) for x in digits[point_offset:digit_count])) # depends on [control=['if'], data=['point_offset', 'digit_count']]
return ''.join(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.