code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def post(self, request, *args, **kwargs):
""" Post a service request (requires authentication) """
service_code = request.data['service_code']
if service_code not in SERVICES.keys():
return Response({ 'detail': _('Service not found') }, status=404)
serializers = {
'node': NodeRequestSerializer,
'vote': VoteRequestSerializer,
'comment': CommentRequestSerializer,
'rate': RatingRequestSerializer,
}
# init right serializer
kwargs['service_code'] = service_code
kwargs['serializer'] = serializers[service_code]
user=self.get_custom_data()
request.UPDATED = request.data.copy()
request.UPDATED['user'] = user['user']
if service_code == 'node':
for checkPOSTdata in ('layer','name','lat','long'):
# Check if mandatory parameters key exists
if checkPOSTdata not in request.data.keys():
return Response({ 'detail': _('Mandatory parameter not found') }, status=400)
else:
# Check if mandatory parameters values have been inserted
if not request.data[checkPOSTdata] :
return Response({ 'detail': _('Mandatory parameter not found') }, status=400)
# Get layer id
layer = Layer.objects.get(slug=request.UPDATED['layer'])
request.UPDATED['layer'] = layer.id
# Transform coords in wkt geometry
lat = float(request.UPDATED['lat'])
long = float(request.UPDATED['long'])
point = Point((long, lat))
request.UPDATED['geometry'] = point.wkt
request.UPDATED['slug'] = slugify(request.UPDATED['name'])
return self.create(request, *args, **kwargs)
|
def function[post, parameter[self, request]]:
constant[ Post a service request (requires authentication) ]
variable[service_code] assign[=] call[name[request].data][constant[service_code]]
if compare[name[service_code] <ast.NotIn object at 0x7da2590d7190> call[name[SERVICES].keys, parameter[]]] begin[:]
return[call[name[Response], parameter[dictionary[[<ast.Constant object at 0x7da18f09d330>], [<ast.Call object at 0x7da18f09df90>]]]]]
variable[serializers] assign[=] dictionary[[<ast.Constant object at 0x7da18f09fd60>, <ast.Constant object at 0x7da18f09c490>, <ast.Constant object at 0x7da18f09e500>, <ast.Constant object at 0x7da18f09f130>], [<ast.Name object at 0x7da18f09e770>, <ast.Name object at 0x7da18f09d1b0>, <ast.Name object at 0x7da18f09d030>, <ast.Name object at 0x7da18f09e7a0>]]
call[name[kwargs]][constant[service_code]] assign[=] name[service_code]
call[name[kwargs]][constant[serializer]] assign[=] call[name[serializers]][name[service_code]]
variable[user] assign[=] call[name[self].get_custom_data, parameter[]]
name[request].UPDATED assign[=] call[name[request].data.copy, parameter[]]
call[name[request].UPDATED][constant[user]] assign[=] call[name[user]][constant[user]]
if compare[name[service_code] equal[==] constant[node]] begin[:]
for taget[name[checkPOSTdata]] in starred[tuple[[<ast.Constant object at 0x7da18f09f0a0>, <ast.Constant object at 0x7da18f09cd30>, <ast.Constant object at 0x7da18f09da50>, <ast.Constant object at 0x7da18f09de10>]]] begin[:]
if compare[name[checkPOSTdata] <ast.NotIn object at 0x7da2590d7190> call[name[request].data.keys, parameter[]]] begin[:]
return[call[name[Response], parameter[dictionary[[<ast.Constant object at 0x7da18f09c970>], [<ast.Call object at 0x7da18f09e1d0>]]]]]
variable[layer] assign[=] call[name[Layer].objects.get, parameter[]]
call[name[request].UPDATED][constant[layer]] assign[=] name[layer].id
variable[lat] assign[=] call[name[float], parameter[call[name[request].UPDATED][constant[lat]]]]
variable[long] assign[=] call[name[float], parameter[call[name[request].UPDATED][constant[long]]]]
variable[point] assign[=] call[name[Point], parameter[tuple[[<ast.Name object at 0x7da18f09ed10>, <ast.Name object at 0x7da18f09c610>]]]]
call[name[request].UPDATED][constant[geometry]] assign[=] name[point].wkt
call[name[request].UPDATED][constant[slug]] assign[=] call[name[slugify], parameter[call[name[request].UPDATED][constant[name]]]]
return[call[name[self].create, parameter[name[request], <ast.Starred object at 0x7da18f09c880>]]]
|
keyword[def] identifier[post] ( identifier[self] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[service_code] = identifier[request] . identifier[data] [ literal[string] ]
keyword[if] identifier[service_code] keyword[not] keyword[in] identifier[SERVICES] . identifier[keys] ():
keyword[return] identifier[Response] ({ literal[string] : identifier[_] ( literal[string] )}, identifier[status] = literal[int] )
identifier[serializers] ={
literal[string] : identifier[NodeRequestSerializer] ,
literal[string] : identifier[VoteRequestSerializer] ,
literal[string] : identifier[CommentRequestSerializer] ,
literal[string] : identifier[RatingRequestSerializer] ,
}
identifier[kwargs] [ literal[string] ]= identifier[service_code]
identifier[kwargs] [ literal[string] ]= identifier[serializers] [ identifier[service_code] ]
identifier[user] = identifier[self] . identifier[get_custom_data] ()
identifier[request] . identifier[UPDATED] = identifier[request] . identifier[data] . identifier[copy] ()
identifier[request] . identifier[UPDATED] [ literal[string] ]= identifier[user] [ literal[string] ]
keyword[if] identifier[service_code] == literal[string] :
keyword[for] identifier[checkPOSTdata] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[checkPOSTdata] keyword[not] keyword[in] identifier[request] . identifier[data] . identifier[keys] ():
keyword[return] identifier[Response] ({ literal[string] : identifier[_] ( literal[string] )}, identifier[status] = literal[int] )
keyword[else] :
keyword[if] keyword[not] identifier[request] . identifier[data] [ identifier[checkPOSTdata] ]:
keyword[return] identifier[Response] ({ literal[string] : identifier[_] ( literal[string] )}, identifier[status] = literal[int] )
identifier[layer] = identifier[Layer] . identifier[objects] . identifier[get] ( identifier[slug] = identifier[request] . identifier[UPDATED] [ literal[string] ])
identifier[request] . identifier[UPDATED] [ literal[string] ]= identifier[layer] . identifier[id]
identifier[lat] = identifier[float] ( identifier[request] . identifier[UPDATED] [ literal[string] ])
identifier[long] = identifier[float] ( identifier[request] . identifier[UPDATED] [ literal[string] ])
identifier[point] = identifier[Point] (( identifier[long] , identifier[lat] ))
identifier[request] . identifier[UPDATED] [ literal[string] ]= identifier[point] . identifier[wkt]
identifier[request] . identifier[UPDATED] [ literal[string] ]= identifier[slugify] ( identifier[request] . identifier[UPDATED] [ literal[string] ])
keyword[return] identifier[self] . identifier[create] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] )
|
def post(self, request, *args, **kwargs):
""" Post a service request (requires authentication) """
service_code = request.data['service_code']
if service_code not in SERVICES.keys():
return Response({'detail': _('Service not found')}, status=404) # depends on [control=['if'], data=[]]
serializers = {'node': NodeRequestSerializer, 'vote': VoteRequestSerializer, 'comment': CommentRequestSerializer, 'rate': RatingRequestSerializer}
# init right serializer
kwargs['service_code'] = service_code
kwargs['serializer'] = serializers[service_code]
user = self.get_custom_data()
request.UPDATED = request.data.copy()
request.UPDATED['user'] = user['user']
if service_code == 'node':
for checkPOSTdata in ('layer', 'name', 'lat', 'long'):
# Check if mandatory parameters key exists
if checkPOSTdata not in request.data.keys():
return Response({'detail': _('Mandatory parameter not found')}, status=400) # depends on [control=['if'], data=[]]
# Check if mandatory parameters values have been inserted
elif not request.data[checkPOSTdata]:
return Response({'detail': _('Mandatory parameter not found')}, status=400) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['checkPOSTdata']]
# Get layer id
layer = Layer.objects.get(slug=request.UPDATED['layer'])
request.UPDATED['layer'] = layer.id
# Transform coords in wkt geometry
lat = float(request.UPDATED['lat'])
long = float(request.UPDATED['long'])
point = Point((long, lat))
request.UPDATED['geometry'] = point.wkt
request.UPDATED['slug'] = slugify(request.UPDATED['name']) # depends on [control=['if'], data=[]]
return self.create(request, *args, **kwargs)
|
def read(self, fieldname):
"""
Read the next field from the stream. `fieldname` is used
to verify that your application logic matches the message
field order, and if `fieldname` does not match the next
field in the message definition, :class:`SendlibError`
is raised.
Returns a Python object of the correct type, depending on
the type present in the stream. If the type is ``data``,
returns a :class:`Data` file-like object.
"""
if self._pos == -1:
self._pos = 0
if PREFIX['message'] != self.stream.read(1):
raise SendlibError('Invalid message format')
if PREFIX['str'] != self.stream.read(1):
raise SendlibError('Invalid message format')
name = self._read_str()
if PREFIX['int'] != self.stream.read(1):
raise SendlibError('Invalid message format')
version = self._read_int()
if name != self.message.name or version != self.message.version:
raise SendlibError(
'Reader for %s cannot read message of type (%s, %d)'
% (self.message, name, version))
if self._data is not None:
if self._data.bytes_remaining() == 0:
self._data = None
else:
raise Exception('cannot read field, cursor still on data')
if self._peek is None:
self._peek = self.stream.read(1)
typename = self._check(fieldname)
reader = getattr(self, '_read_' + typename)
value = reader()
self._pos += 1
self._peek = None
return value
|
def function[read, parameter[self, fieldname]]:
constant[
Read the next field from the stream. `fieldname` is used
to verify that your application logic matches the message
field order, and if `fieldname` does not match the next
field in the message definition, :class:`SendlibError`
is raised.
Returns a Python object of the correct type, depending on
the type present in the stream. If the type is ``data``,
returns a :class:`Data` file-like object.
]
if compare[name[self]._pos equal[==] <ast.UnaryOp object at 0x7da204565db0>] begin[:]
name[self]._pos assign[=] constant[0]
if compare[call[name[PREFIX]][constant[message]] not_equal[!=] call[name[self].stream.read, parameter[constant[1]]]] begin[:]
<ast.Raise object at 0x7da204565c90>
if compare[call[name[PREFIX]][constant[str]] not_equal[!=] call[name[self].stream.read, parameter[constant[1]]]] begin[:]
<ast.Raise object at 0x7da204566650>
variable[name] assign[=] call[name[self]._read_str, parameter[]]
if compare[call[name[PREFIX]][constant[int]] not_equal[!=] call[name[self].stream.read, parameter[constant[1]]]] begin[:]
<ast.Raise object at 0x7da204345e40>
variable[version] assign[=] call[name[self]._read_int, parameter[]]
if <ast.BoolOp object at 0x7da204344220> begin[:]
<ast.Raise object at 0x7da20c76dab0>
if compare[name[self]._data is_not constant[None]] begin[:]
if compare[call[name[self]._data.bytes_remaining, parameter[]] equal[==] constant[0]] begin[:]
name[self]._data assign[=] constant[None]
if compare[name[self]._peek is constant[None]] begin[:]
name[self]._peek assign[=] call[name[self].stream.read, parameter[constant[1]]]
variable[typename] assign[=] call[name[self]._check, parameter[name[fieldname]]]
variable[reader] assign[=] call[name[getattr], parameter[name[self], binary_operation[constant[_read_] + name[typename]]]]
variable[value] assign[=] call[name[reader], parameter[]]
<ast.AugAssign object at 0x7da20c76f520>
name[self]._peek assign[=] constant[None]
return[name[value]]
|
keyword[def] identifier[read] ( identifier[self] , identifier[fieldname] ):
literal[string]
keyword[if] identifier[self] . identifier[_pos] ==- literal[int] :
identifier[self] . identifier[_pos] = literal[int]
keyword[if] identifier[PREFIX] [ literal[string] ]!= identifier[self] . identifier[stream] . identifier[read] ( literal[int] ):
keyword[raise] identifier[SendlibError] ( literal[string] )
keyword[if] identifier[PREFIX] [ literal[string] ]!= identifier[self] . identifier[stream] . identifier[read] ( literal[int] ):
keyword[raise] identifier[SendlibError] ( literal[string] )
identifier[name] = identifier[self] . identifier[_read_str] ()
keyword[if] identifier[PREFIX] [ literal[string] ]!= identifier[self] . identifier[stream] . identifier[read] ( literal[int] ):
keyword[raise] identifier[SendlibError] ( literal[string] )
identifier[version] = identifier[self] . identifier[_read_int] ()
keyword[if] identifier[name] != identifier[self] . identifier[message] . identifier[name] keyword[or] identifier[version] != identifier[self] . identifier[message] . identifier[version] :
keyword[raise] identifier[SendlibError] (
literal[string]
%( identifier[self] . identifier[message] , identifier[name] , identifier[version] ))
keyword[if] identifier[self] . identifier[_data] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[_data] . identifier[bytes_remaining] ()== literal[int] :
identifier[self] . identifier[_data] = keyword[None]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[self] . identifier[_peek] keyword[is] keyword[None] :
identifier[self] . identifier[_peek] = identifier[self] . identifier[stream] . identifier[read] ( literal[int] )
identifier[typename] = identifier[self] . identifier[_check] ( identifier[fieldname] )
identifier[reader] = identifier[getattr] ( identifier[self] , literal[string] + identifier[typename] )
identifier[value] = identifier[reader] ()
identifier[self] . identifier[_pos] += literal[int]
identifier[self] . identifier[_peek] = keyword[None]
keyword[return] identifier[value]
|
def read(self, fieldname):
"""
Read the next field from the stream. `fieldname` is used
to verify that your application logic matches the message
field order, and if `fieldname` does not match the next
field in the message definition, :class:`SendlibError`
is raised.
Returns a Python object of the correct type, depending on
the type present in the stream. If the type is ``data``,
returns a :class:`Data` file-like object.
"""
if self._pos == -1:
self._pos = 0
if PREFIX['message'] != self.stream.read(1):
raise SendlibError('Invalid message format') # depends on [control=['if'], data=[]]
if PREFIX['str'] != self.stream.read(1):
raise SendlibError('Invalid message format') # depends on [control=['if'], data=[]]
name = self._read_str()
if PREFIX['int'] != self.stream.read(1):
raise SendlibError('Invalid message format') # depends on [control=['if'], data=[]]
version = self._read_int()
if name != self.message.name or version != self.message.version:
raise SendlibError('Reader for %s cannot read message of type (%s, %d)' % (self.message, name, version)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self._data is not None:
if self._data.bytes_remaining() == 0:
self._data = None # depends on [control=['if'], data=[]]
else:
raise Exception('cannot read field, cursor still on data') # depends on [control=['if'], data=[]]
if self._peek is None:
self._peek = self.stream.read(1) # depends on [control=['if'], data=[]]
typename = self._check(fieldname)
reader = getattr(self, '_read_' + typename)
value = reader()
self._pos += 1
self._peek = None
return value
|
def make_dataloader(data_train, data_val, data_test, args,
use_average_length=False, num_shards=0, num_workers=8):
"""Create data loaders for training/validation/test."""
data_train_lengths = get_data_lengths(data_train)
data_val_lengths = get_data_lengths(data_val)
data_test_lengths = get_data_lengths(data_test)
train_batchify_fn = btf.Tuple(btf.Pad(), btf.Pad(),
btf.Stack(dtype='float32'), btf.Stack(dtype='float32'))
test_batchify_fn = btf.Tuple(btf.Pad(), btf.Pad(),
btf.Stack(dtype='float32'), btf.Stack(dtype='float32'),
btf.Stack())
target_val_lengths = list(map(lambda x: x[-1], data_val_lengths))
target_test_lengths = list(map(lambda x: x[-1], data_test_lengths))
if args.bucket_scheme == 'constant':
bucket_scheme = nlp.data.ConstWidthBucket()
elif args.bucket_scheme == 'linear':
bucket_scheme = nlp.data.LinearWidthBucket()
elif args.bucket_scheme == 'exp':
bucket_scheme = nlp.data.ExpWidthBucket(bucket_len_step=1.2)
else:
raise NotImplementedError
train_batch_sampler = nlp.data.FixedBucketSampler(lengths=data_train_lengths,
batch_size=args.batch_size,
num_buckets=args.num_buckets,
ratio=args.bucket_ratio,
shuffle=True,
use_average_length=use_average_length,
num_shards=num_shards,
bucket_scheme=bucket_scheme)
logging.info('Train Batch Sampler:\n%s', train_batch_sampler.stats())
train_data_loader = nlp.data.ShardedDataLoader(data_train,
batch_sampler=train_batch_sampler,
batchify_fn=train_batchify_fn,
num_workers=num_workers)
val_batch_sampler = nlp.data.FixedBucketSampler(lengths=target_val_lengths,
batch_size=args.test_batch_size,
num_buckets=args.num_buckets,
ratio=args.bucket_ratio,
shuffle=False,
use_average_length=use_average_length,
bucket_scheme=bucket_scheme)
logging.info('Valid Batch Sampler:\n%s', val_batch_sampler.stats())
val_data_loader = gluon.data.DataLoader(data_val,
batch_sampler=val_batch_sampler,
batchify_fn=test_batchify_fn,
num_workers=num_workers)
test_batch_sampler = nlp.data.FixedBucketSampler(lengths=target_test_lengths,
batch_size=args.test_batch_size,
num_buckets=args.num_buckets,
ratio=args.bucket_ratio,
shuffle=False,
use_average_length=use_average_length,
bucket_scheme=bucket_scheme)
logging.info('Test Batch Sampler:\n%s', test_batch_sampler.stats())
test_data_loader = gluon.data.DataLoader(data_test,
batch_sampler=test_batch_sampler,
batchify_fn=test_batchify_fn,
num_workers=num_workers)
return train_data_loader, val_data_loader, test_data_loader
|
def function[make_dataloader, parameter[data_train, data_val, data_test, args, use_average_length, num_shards, num_workers]]:
constant[Create data loaders for training/validation/test.]
variable[data_train_lengths] assign[=] call[name[get_data_lengths], parameter[name[data_train]]]
variable[data_val_lengths] assign[=] call[name[get_data_lengths], parameter[name[data_val]]]
variable[data_test_lengths] assign[=] call[name[get_data_lengths], parameter[name[data_test]]]
variable[train_batchify_fn] assign[=] call[name[btf].Tuple, parameter[call[name[btf].Pad, parameter[]], call[name[btf].Pad, parameter[]], call[name[btf].Stack, parameter[]], call[name[btf].Stack, parameter[]]]]
variable[test_batchify_fn] assign[=] call[name[btf].Tuple, parameter[call[name[btf].Pad, parameter[]], call[name[btf].Pad, parameter[]], call[name[btf].Stack, parameter[]], call[name[btf].Stack, parameter[]], call[name[btf].Stack, parameter[]]]]
variable[target_val_lengths] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da18dc04d60>, name[data_val_lengths]]]]]
variable[target_test_lengths] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da18dc06bc0>, name[data_test_lengths]]]]]
if compare[name[args].bucket_scheme equal[==] constant[constant]] begin[:]
variable[bucket_scheme] assign[=] call[name[nlp].data.ConstWidthBucket, parameter[]]
variable[train_batch_sampler] assign[=] call[name[nlp].data.FixedBucketSampler, parameter[]]
call[name[logging].info, parameter[constant[Train Batch Sampler:
%s], call[name[train_batch_sampler].stats, parameter[]]]]
variable[train_data_loader] assign[=] call[name[nlp].data.ShardedDataLoader, parameter[name[data_train]]]
variable[val_batch_sampler] assign[=] call[name[nlp].data.FixedBucketSampler, parameter[]]
call[name[logging].info, parameter[constant[Valid Batch Sampler:
%s], call[name[val_batch_sampler].stats, parameter[]]]]
variable[val_data_loader] assign[=] call[name[gluon].data.DataLoader, parameter[name[data_val]]]
variable[test_batch_sampler] assign[=] call[name[nlp].data.FixedBucketSampler, parameter[]]
call[name[logging].info, parameter[constant[Test Batch Sampler:
%s], call[name[test_batch_sampler].stats, parameter[]]]]
variable[test_data_loader] assign[=] call[name[gluon].data.DataLoader, parameter[name[data_test]]]
return[tuple[[<ast.Name object at 0x7da18fe93eb0>, <ast.Name object at 0x7da18fe933d0>, <ast.Name object at 0x7da18fe90790>]]]
|
keyword[def] identifier[make_dataloader] ( identifier[data_train] , identifier[data_val] , identifier[data_test] , identifier[args] ,
identifier[use_average_length] = keyword[False] , identifier[num_shards] = literal[int] , identifier[num_workers] = literal[int] ):
literal[string]
identifier[data_train_lengths] = identifier[get_data_lengths] ( identifier[data_train] )
identifier[data_val_lengths] = identifier[get_data_lengths] ( identifier[data_val] )
identifier[data_test_lengths] = identifier[get_data_lengths] ( identifier[data_test] )
identifier[train_batchify_fn] = identifier[btf] . identifier[Tuple] ( identifier[btf] . identifier[Pad] (), identifier[btf] . identifier[Pad] (),
identifier[btf] . identifier[Stack] ( identifier[dtype] = literal[string] ), identifier[btf] . identifier[Stack] ( identifier[dtype] = literal[string] ))
identifier[test_batchify_fn] = identifier[btf] . identifier[Tuple] ( identifier[btf] . identifier[Pad] (), identifier[btf] . identifier[Pad] (),
identifier[btf] . identifier[Stack] ( identifier[dtype] = literal[string] ), identifier[btf] . identifier[Stack] ( identifier[dtype] = literal[string] ),
identifier[btf] . identifier[Stack] ())
identifier[target_val_lengths] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[x] [- literal[int] ], identifier[data_val_lengths] ))
identifier[target_test_lengths] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[x] [- literal[int] ], identifier[data_test_lengths] ))
keyword[if] identifier[args] . identifier[bucket_scheme] == literal[string] :
identifier[bucket_scheme] = identifier[nlp] . identifier[data] . identifier[ConstWidthBucket] ()
keyword[elif] identifier[args] . identifier[bucket_scheme] == literal[string] :
identifier[bucket_scheme] = identifier[nlp] . identifier[data] . identifier[LinearWidthBucket] ()
keyword[elif] identifier[args] . identifier[bucket_scheme] == literal[string] :
identifier[bucket_scheme] = identifier[nlp] . identifier[data] . identifier[ExpWidthBucket] ( identifier[bucket_len_step] = literal[int] )
keyword[else] :
keyword[raise] identifier[NotImplementedError]
identifier[train_batch_sampler] = identifier[nlp] . identifier[data] . identifier[FixedBucketSampler] ( identifier[lengths] = identifier[data_train_lengths] ,
identifier[batch_size] = identifier[args] . identifier[batch_size] ,
identifier[num_buckets] = identifier[args] . identifier[num_buckets] ,
identifier[ratio] = identifier[args] . identifier[bucket_ratio] ,
identifier[shuffle] = keyword[True] ,
identifier[use_average_length] = identifier[use_average_length] ,
identifier[num_shards] = identifier[num_shards] ,
identifier[bucket_scheme] = identifier[bucket_scheme] )
identifier[logging] . identifier[info] ( literal[string] , identifier[train_batch_sampler] . identifier[stats] ())
identifier[train_data_loader] = identifier[nlp] . identifier[data] . identifier[ShardedDataLoader] ( identifier[data_train] ,
identifier[batch_sampler] = identifier[train_batch_sampler] ,
identifier[batchify_fn] = identifier[train_batchify_fn] ,
identifier[num_workers] = identifier[num_workers] )
identifier[val_batch_sampler] = identifier[nlp] . identifier[data] . identifier[FixedBucketSampler] ( identifier[lengths] = identifier[target_val_lengths] ,
identifier[batch_size] = identifier[args] . identifier[test_batch_size] ,
identifier[num_buckets] = identifier[args] . identifier[num_buckets] ,
identifier[ratio] = identifier[args] . identifier[bucket_ratio] ,
identifier[shuffle] = keyword[False] ,
identifier[use_average_length] = identifier[use_average_length] ,
identifier[bucket_scheme] = identifier[bucket_scheme] )
identifier[logging] . identifier[info] ( literal[string] , identifier[val_batch_sampler] . identifier[stats] ())
identifier[val_data_loader] = identifier[gluon] . identifier[data] . identifier[DataLoader] ( identifier[data_val] ,
identifier[batch_sampler] = identifier[val_batch_sampler] ,
identifier[batchify_fn] = identifier[test_batchify_fn] ,
identifier[num_workers] = identifier[num_workers] )
identifier[test_batch_sampler] = identifier[nlp] . identifier[data] . identifier[FixedBucketSampler] ( identifier[lengths] = identifier[target_test_lengths] ,
identifier[batch_size] = identifier[args] . identifier[test_batch_size] ,
identifier[num_buckets] = identifier[args] . identifier[num_buckets] ,
identifier[ratio] = identifier[args] . identifier[bucket_ratio] ,
identifier[shuffle] = keyword[False] ,
identifier[use_average_length] = identifier[use_average_length] ,
identifier[bucket_scheme] = identifier[bucket_scheme] )
identifier[logging] . identifier[info] ( literal[string] , identifier[test_batch_sampler] . identifier[stats] ())
identifier[test_data_loader] = identifier[gluon] . identifier[data] . identifier[DataLoader] ( identifier[data_test] ,
identifier[batch_sampler] = identifier[test_batch_sampler] ,
identifier[batchify_fn] = identifier[test_batchify_fn] ,
identifier[num_workers] = identifier[num_workers] )
keyword[return] identifier[train_data_loader] , identifier[val_data_loader] , identifier[test_data_loader]
|
def make_dataloader(data_train, data_val, data_test, args, use_average_length=False, num_shards=0, num_workers=8):
"""Create data loaders for training/validation/test."""
data_train_lengths = get_data_lengths(data_train)
data_val_lengths = get_data_lengths(data_val)
data_test_lengths = get_data_lengths(data_test)
train_batchify_fn = btf.Tuple(btf.Pad(), btf.Pad(), btf.Stack(dtype='float32'), btf.Stack(dtype='float32'))
test_batchify_fn = btf.Tuple(btf.Pad(), btf.Pad(), btf.Stack(dtype='float32'), btf.Stack(dtype='float32'), btf.Stack())
target_val_lengths = list(map(lambda x: x[-1], data_val_lengths))
target_test_lengths = list(map(lambda x: x[-1], data_test_lengths))
if args.bucket_scheme == 'constant':
bucket_scheme = nlp.data.ConstWidthBucket() # depends on [control=['if'], data=[]]
elif args.bucket_scheme == 'linear':
bucket_scheme = nlp.data.LinearWidthBucket() # depends on [control=['if'], data=[]]
elif args.bucket_scheme == 'exp':
bucket_scheme = nlp.data.ExpWidthBucket(bucket_len_step=1.2) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError
train_batch_sampler = nlp.data.FixedBucketSampler(lengths=data_train_lengths, batch_size=args.batch_size, num_buckets=args.num_buckets, ratio=args.bucket_ratio, shuffle=True, use_average_length=use_average_length, num_shards=num_shards, bucket_scheme=bucket_scheme)
logging.info('Train Batch Sampler:\n%s', train_batch_sampler.stats())
train_data_loader = nlp.data.ShardedDataLoader(data_train, batch_sampler=train_batch_sampler, batchify_fn=train_batchify_fn, num_workers=num_workers)
val_batch_sampler = nlp.data.FixedBucketSampler(lengths=target_val_lengths, batch_size=args.test_batch_size, num_buckets=args.num_buckets, ratio=args.bucket_ratio, shuffle=False, use_average_length=use_average_length, bucket_scheme=bucket_scheme)
logging.info('Valid Batch Sampler:\n%s', val_batch_sampler.stats())
val_data_loader = gluon.data.DataLoader(data_val, batch_sampler=val_batch_sampler, batchify_fn=test_batchify_fn, num_workers=num_workers)
test_batch_sampler = nlp.data.FixedBucketSampler(lengths=target_test_lengths, batch_size=args.test_batch_size, num_buckets=args.num_buckets, ratio=args.bucket_ratio, shuffle=False, use_average_length=use_average_length, bucket_scheme=bucket_scheme)
logging.info('Test Batch Sampler:\n%s', test_batch_sampler.stats())
test_data_loader = gluon.data.DataLoader(data_test, batch_sampler=test_batch_sampler, batchify_fn=test_batchify_fn, num_workers=num_workers)
return (train_data_loader, val_data_loader, test_data_loader)
|
def keep_types(self, base_key, out_key, *types):
"""
Method to keep only specific parameters from a parameter documentation.
This method extracts the given `type` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation with only the description of the type. This method works
for the ``'Results'`` sections.
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
out_key: str
Extension for the base key (the final key will be like
``'%s.%s' % (base_key, out_key)``
``*types``
str. The type identifier of which the documentations shall be
in the new section
See Also
--------
delete_types, keep_params
Examples
--------
To extract just two return arguments from a function and reuse their
docstrings, you can type::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor()
>>> @d.get_sectionsf('do_something', sections=['Returns'])
... def do_something():
... '''
... That's %(doc_key)s
...
... Returns
... -------
... float
... A random number
... int
... A random integer'''
... return 1.0, 4
>>> d.keep_types('do_something.returns', 'int_only', 'int')
>>> @d.dedent
... def do_less():
... '''
... My second function that only returns an integer
...
... Returns
... -------
... %(do_something.returns.int_only)s'''
... return do_something()[1]
>>> print(do_less.__doc__)
My second function that only returns an integer
<BLANKLINE>
Returns
-------
int
A random integer
Equivalently, you can use the :meth:`delete_types` method to remove
parameters::
>>> d.delete_types('do_something.returns', 'no_float', 'float')
>>> @d.dedent
... def do_less():
... '''
... My second function with only `a` and `c`
...
... Returns
... ----------
... %(do_something.returns.no_float)s'''
... return do_something()[1]
"""
self.params['%s.%s' % (base_key, out_key)] = self.keep_types_s(
self.params[base_key], types)
|
def function[keep_types, parameter[self, base_key, out_key]]:
constant[
Method to keep only specific parameters from a parameter documentation.
This method extracts the given `type` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation with only the description of the type. This method works
for the ``'Results'`` sections.
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
out_key: str
Extension for the base key (the final key will be like
``'%s.%s' % (base_key, out_key)``
``*types``
str. The type identifier of which the documentations shall be
in the new section
See Also
--------
delete_types, keep_params
Examples
--------
To extract just two return arguments from a function and reuse their
docstrings, you can type::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor()
>>> @d.get_sectionsf('do_something', sections=['Returns'])
... def do_something():
... '''
... That's %(doc_key)s
...
... Returns
... -------
... float
... A random number
... int
... A random integer'''
... return 1.0, 4
>>> d.keep_types('do_something.returns', 'int_only', 'int')
>>> @d.dedent
... def do_less():
... '''
... My second function that only returns an integer
...
... Returns
... -------
... %(do_something.returns.int_only)s'''
... return do_something()[1]
>>> print(do_less.__doc__)
My second function that only returns an integer
<BLANKLINE>
Returns
-------
int
A random integer
Equivalently, you can use the :meth:`delete_types` method to remove
parameters::
>>> d.delete_types('do_something.returns', 'no_float', 'float')
>>> @d.dedent
... def do_less():
... '''
... My second function with only `a` and `c`
...
... Returns
... ----------
... %(do_something.returns.no_float)s'''
... return do_something()[1]
]
call[name[self].params][binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1b69d50>, <ast.Name object at 0x7da1b1b68790>]]]] assign[=] call[name[self].keep_types_s, parameter[call[name[self].params][name[base_key]], name[types]]]
|
keyword[def] identifier[keep_types] ( identifier[self] , identifier[base_key] , identifier[out_key] ,* identifier[types] ):
literal[string]
identifier[self] . identifier[params] [ literal[string] %( identifier[base_key] , identifier[out_key] )]= identifier[self] . identifier[keep_types_s] (
identifier[self] . identifier[params] [ identifier[base_key] ], identifier[types] )
|
def keep_types(self, base_key, out_key, *types):
"""
Method to keep only specific parameters from a parameter documentation.
This method extracts the given `type` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation with only the description of the type. This method works
for the ``'Results'`` sections.
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
out_key: str
Extension for the base key (the final key will be like
``'%s.%s' % (base_key, out_key)``
``*types``
str. The type identifier of which the documentations shall be
in the new section
See Also
--------
delete_types, keep_params
Examples
--------
To extract just two return arguments from a function and reuse their
docstrings, you can type::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor()
>>> @d.get_sectionsf('do_something', sections=['Returns'])
... def do_something():
... '''
... That's %(doc_key)s
...
... Returns
... -------
... float
... A random number
... int
... A random integer'''
... return 1.0, 4
>>> d.keep_types('do_something.returns', 'int_only', 'int')
>>> @d.dedent
... def do_less():
... '''
... My second function that only returns an integer
...
... Returns
... -------
... %(do_something.returns.int_only)s'''
... return do_something()[1]
>>> print(do_less.__doc__)
My second function that only returns an integer
<BLANKLINE>
Returns
-------
int
A random integer
Equivalently, you can use the :meth:`delete_types` method to remove
parameters::
>>> d.delete_types('do_something.returns', 'no_float', 'float')
>>> @d.dedent
... def do_less():
... '''
... My second function with only `a` and `c`
...
... Returns
... ----------
... %(do_something.returns.no_float)s'''
... return do_something()[1]
"""
self.params['%s.%s' % (base_key, out_key)] = self.keep_types_s(self.params[base_key], types)
|
def mac(self, mac):
"""Set mac and duid fields
To have common interface with FixedAddress accept mac address
and set duid as a side effect.
'mac' was added to _shadow_fields to prevent sending it out over wapi.
"""
self._mac = mac
if mac:
self.duid = ib_utils.generate_duid(mac)
elif not hasattr(self, 'duid'):
self.duid = None
|
def function[mac, parameter[self, mac]]:
constant[Set mac and duid fields
To have common interface with FixedAddress accept mac address
and set duid as a side effect.
'mac' was added to _shadow_fields to prevent sending it out over wapi.
]
name[self]._mac assign[=] name[mac]
if name[mac] begin[:]
name[self].duid assign[=] call[name[ib_utils].generate_duid, parameter[name[mac]]]
|
keyword[def] identifier[mac] ( identifier[self] , identifier[mac] ):
literal[string]
identifier[self] . identifier[_mac] = identifier[mac]
keyword[if] identifier[mac] :
identifier[self] . identifier[duid] = identifier[ib_utils] . identifier[generate_duid] ( identifier[mac] )
keyword[elif] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[duid] = keyword[None]
|
def mac(self, mac):
"""Set mac and duid fields
To have common interface with FixedAddress accept mac address
and set duid as a side effect.
'mac' was added to _shadow_fields to prevent sending it out over wapi.
"""
self._mac = mac
if mac:
self.duid = ib_utils.generate_duid(mac) # depends on [control=['if'], data=[]]
elif not hasattr(self, 'duid'):
self.duid = None # depends on [control=['if'], data=[]]
|
def partition_query(
self,
sql,
params=None,
param_types=None,
partition_size_bytes=None,
max_partitions=None,
):
"""Perform a ``ParitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
"""
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction not started.")
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
params_pb = Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
params_pb = None
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
partition_options = PartitionOptions(
partition_size_bytes=partition_size_bytes, max_partitions=max_partitions
)
response = api.partition_query(
session=self._session.name,
sql=sql,
transaction=transaction,
params=params_pb,
param_types=param_types,
partition_options=partition_options,
metadata=metadata,
)
return [partition.partition_token for partition in response.partitions]
|
def function[partition_query, parameter[self, sql, params, param_types, partition_size_bytes, max_partitions]]:
constant[Perform a ``ParitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
]
if <ast.UnaryOp object at 0x7da2045650f0> begin[:]
<ast.Raise object at 0x7da204566fb0>
if compare[name[self]._transaction_id is constant[None]] begin[:]
<ast.Raise object at 0x7da204565a80>
if compare[name[params] is_not constant[None]] begin[:]
if compare[name[param_types] is constant[None]] begin[:]
<ast.Raise object at 0x7da204564910>
variable[params_pb] assign[=] call[name[Struct], parameter[]]
variable[database] assign[=] name[self]._session._database
variable[api] assign[=] name[database].spanner_api
variable[metadata] assign[=] call[name[_metadata_with_prefix], parameter[name[database].name]]
variable[transaction] assign[=] call[name[self]._make_txn_selector, parameter[]]
variable[partition_options] assign[=] call[name[PartitionOptions], parameter[]]
variable[response] assign[=] call[name[api].partition_query, parameter[]]
return[<ast.ListComp object at 0x7da20c6aa2f0>]
|
keyword[def] identifier[partition_query] (
identifier[self] ,
identifier[sql] ,
identifier[params] = keyword[None] ,
identifier[param_types] = keyword[None] ,
identifier[partition_size_bytes] = keyword[None] ,
identifier[max_partitions] = keyword[None] ,
):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_multi_use] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[_transaction_id] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[params] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[param_types] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[params_pb] = identifier[Struct] (
identifier[fields] ={ identifier[key] : identifier[_make_value_pb] ( identifier[value] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[params] . identifier[items] ()}
)
keyword[else] :
identifier[params_pb] = keyword[None]
identifier[database] = identifier[self] . identifier[_session] . identifier[_database]
identifier[api] = identifier[database] . identifier[spanner_api]
identifier[metadata] = identifier[_metadata_with_prefix] ( identifier[database] . identifier[name] )
identifier[transaction] = identifier[self] . identifier[_make_txn_selector] ()
identifier[partition_options] = identifier[PartitionOptions] (
identifier[partition_size_bytes] = identifier[partition_size_bytes] , identifier[max_partitions] = identifier[max_partitions]
)
identifier[response] = identifier[api] . identifier[partition_query] (
identifier[session] = identifier[self] . identifier[_session] . identifier[name] ,
identifier[sql] = identifier[sql] ,
identifier[transaction] = identifier[transaction] ,
identifier[params] = identifier[params_pb] ,
identifier[param_types] = identifier[param_types] ,
identifier[partition_options] = identifier[partition_options] ,
identifier[metadata] = identifier[metadata] ,
)
keyword[return] [ identifier[partition] . identifier[partition_token] keyword[for] identifier[partition] keyword[in] identifier[response] . identifier[partitions] ]
|
def partition_query(self, sql, params=None, param_types=None, partition_size_bytes=None, max_partitions=None):
"""Perform a ``ParitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
"""
if not self._multi_use:
raise ValueError('Cannot use single-use snapshot.') # depends on [control=['if'], data=[]]
if self._transaction_id is None:
raise ValueError('Transaction not started.') # depends on [control=['if'], data=[]]
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.") # depends on [control=['if'], data=[]]
params_pb = Struct(fields={key: _make_value_pb(value) for (key, value) in params.items()}) # depends on [control=['if'], data=['params']]
else:
params_pb = None
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
partition_options = PartitionOptions(partition_size_bytes=partition_size_bytes, max_partitions=max_partitions)
response = api.partition_query(session=self._session.name, sql=sql, transaction=transaction, params=params_pb, param_types=param_types, partition_options=partition_options, metadata=metadata)
return [partition.partition_token for partition in response.partitions]
|
def loads(self, config_str, as_defaults=False):
"""
Load configuration values from the specified source string.
Args:
config_str:
as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
"""
self._rw.load_config_from_string(self._config, config_str, as_defaults=as_defaults)
|
def function[loads, parameter[self, config_str, as_defaults]]:
constant[
Load configuration values from the specified source string.
Args:
config_str:
as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
]
call[name[self]._rw.load_config_from_string, parameter[name[self]._config, name[config_str]]]
|
keyword[def] identifier[loads] ( identifier[self] , identifier[config_str] , identifier[as_defaults] = keyword[False] ):
literal[string]
identifier[self] . identifier[_rw] . identifier[load_config_from_string] ( identifier[self] . identifier[_config] , identifier[config_str] , identifier[as_defaults] = identifier[as_defaults] )
|
def loads(self, config_str, as_defaults=False):
"""
Load configuration values from the specified source string.
Args:
config_str:
as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
"""
self._rw.load_config_from_string(self._config, config_str, as_defaults=as_defaults)
|
def ReadUserNotifications(self,
username,
state=None,
timerange=None,
cursor=None):
"""Reads notifications scheduled for a user within a given timerange."""
query = ("SELECT UNIX_TIMESTAMP(timestamp), "
" notification_state, notification "
"FROM user_notification "
"WHERE username_hash = %s ")
args = [mysql_utils.Hash(username)]
if state is not None:
query += "AND notification_state = %s "
args.append(int(state))
if timerange is not None:
time_from, time_to = timerange # pylint: disable=unpacking-non-sequence
if time_from is not None:
query += "AND timestamp >= FROM_UNIXTIME(%s) "
args.append(mysql_utils.RDFDatetimeToTimestamp(time_from))
if time_to is not None:
query += "AND timestamp <= FROM_UNIXTIME(%s) "
args.append(mysql_utils.RDFDatetimeToTimestamp(time_to))
query += "ORDER BY timestamp DESC "
ret = []
cursor.execute(query, args)
for timestamp, state, notification_ser in cursor.fetchall():
n = rdf_objects.UserNotification.FromSerializedString(notification_ser)
n.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
n.state = state
ret.append(n)
return ret
|
def function[ReadUserNotifications, parameter[self, username, state, timerange, cursor]]:
constant[Reads notifications scheduled for a user within a given timerange.]
variable[query] assign[=] constant[SELECT UNIX_TIMESTAMP(timestamp), notification_state, notification FROM user_notification WHERE username_hash = %s ]
variable[args] assign[=] list[[<ast.Call object at 0x7da1b1c195a0>]]
if compare[name[state] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1c1b280>
call[name[args].append, parameter[call[name[int], parameter[name[state]]]]]
if compare[name[timerange] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1c18a30> assign[=] name[timerange]
if compare[name[time_from] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1c19570>
call[name[args].append, parameter[call[name[mysql_utils].RDFDatetimeToTimestamp, parameter[name[time_from]]]]]
if compare[name[time_to] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1b46350>
call[name[args].append, parameter[call[name[mysql_utils].RDFDatetimeToTimestamp, parameter[name[time_to]]]]]
<ast.AugAssign object at 0x7da1b1b5a680>
variable[ret] assign[=] list[[]]
call[name[cursor].execute, parameter[name[query], name[args]]]
for taget[tuple[[<ast.Name object at 0x7da1b1b5a3b0>, <ast.Name object at 0x7da1b1b5a380>, <ast.Name object at 0x7da1b1b5a350>]]] in starred[call[name[cursor].fetchall, parameter[]]] begin[:]
variable[n] assign[=] call[name[rdf_objects].UserNotification.FromSerializedString, parameter[name[notification_ser]]]
name[n].timestamp assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[timestamp]]]
name[n].state assign[=] name[state]
call[name[ret].append, parameter[name[n]]]
return[name[ret]]
|
keyword[def] identifier[ReadUserNotifications] ( identifier[self] ,
identifier[username] ,
identifier[state] = keyword[None] ,
identifier[timerange] = keyword[None] ,
identifier[cursor] = keyword[None] ):
literal[string]
identifier[query] =( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[args] =[ identifier[mysql_utils] . identifier[Hash] ( identifier[username] )]
keyword[if] identifier[state] keyword[is] keyword[not] keyword[None] :
identifier[query] += literal[string]
identifier[args] . identifier[append] ( identifier[int] ( identifier[state] ))
keyword[if] identifier[timerange] keyword[is] keyword[not] keyword[None] :
identifier[time_from] , identifier[time_to] = identifier[timerange]
keyword[if] identifier[time_from] keyword[is] keyword[not] keyword[None] :
identifier[query] += literal[string]
identifier[args] . identifier[append] ( identifier[mysql_utils] . identifier[RDFDatetimeToTimestamp] ( identifier[time_from] ))
keyword[if] identifier[time_to] keyword[is] keyword[not] keyword[None] :
identifier[query] += literal[string]
identifier[args] . identifier[append] ( identifier[mysql_utils] . identifier[RDFDatetimeToTimestamp] ( identifier[time_to] ))
identifier[query] += literal[string]
identifier[ret] =[]
identifier[cursor] . identifier[execute] ( identifier[query] , identifier[args] )
keyword[for] identifier[timestamp] , identifier[state] , identifier[notification_ser] keyword[in] identifier[cursor] . identifier[fetchall] ():
identifier[n] = identifier[rdf_objects] . identifier[UserNotification] . identifier[FromSerializedString] ( identifier[notification_ser] )
identifier[n] . identifier[timestamp] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] ( identifier[timestamp] )
identifier[n] . identifier[state] = identifier[state]
identifier[ret] . identifier[append] ( identifier[n] )
keyword[return] identifier[ret]
|
def ReadUserNotifications(self, username, state=None, timerange=None, cursor=None):
"""Reads notifications scheduled for a user within a given timerange."""
query = 'SELECT UNIX_TIMESTAMP(timestamp), notification_state, notification FROM user_notification WHERE username_hash = %s '
args = [mysql_utils.Hash(username)]
if state is not None:
query += 'AND notification_state = %s '
args.append(int(state)) # depends on [control=['if'], data=['state']]
if timerange is not None:
(time_from, time_to) = timerange # pylint: disable=unpacking-non-sequence
if time_from is not None:
query += 'AND timestamp >= FROM_UNIXTIME(%s) '
args.append(mysql_utils.RDFDatetimeToTimestamp(time_from)) # depends on [control=['if'], data=['time_from']]
if time_to is not None:
query += 'AND timestamp <= FROM_UNIXTIME(%s) '
args.append(mysql_utils.RDFDatetimeToTimestamp(time_to)) # depends on [control=['if'], data=['time_to']] # depends on [control=['if'], data=['timerange']]
query += 'ORDER BY timestamp DESC '
ret = []
cursor.execute(query, args)
for (timestamp, state, notification_ser) in cursor.fetchall():
n = rdf_objects.UserNotification.FromSerializedString(notification_ser)
n.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
n.state = state
ret.append(n) # depends on [control=['for'], data=[]]
return ret
|
def metabolite_summary(met, solution=None, threshold=0.01, fva=False,
names=False, floatfmt='.3g'):
"""
Print a summary of the production and consumption fluxes.
This method requires the model for which this metabolite is a part
to be solved.
Parameters
----------
solution : cobra.Solution, optional
A previously solved model solution to use for generating the
summary. If none provided (default), the summary method will
resolve the model. Note that the solution object must match the
model, i.e., changes to the model such as changed bounds,
added or removed reactions are not taken into account by this
method.
threshold : float, optional
Threshold below which fluxes are not reported.
fva : pandas.DataFrame, float or None, optional
Whether or not to include flux variability analysis in the output.
If given, fva should either be a previous FVA solution matching
the model or a float between 0 and 1 representing the
fraction of the optimum objective to be searched.
names : bool, optional
Emit reaction and metabolite names rather than identifiers (default
False).
floatfmt : string, optional
Format string for floats (default '.3g').
"""
if names:
emit = attrgetter('name')
else:
emit = attrgetter('id')
if solution is None:
met.model.slim_optimize(error_value=None)
solution = get_solution(met.model, reactions=met.reactions)
rxns = sorted(met.reactions, key=attrgetter("id"))
rxn_id = list()
rxn_name = list()
flux = list()
reaction = list()
for rxn in rxns:
rxn_id.append(rxn.id)
rxn_name.append(format_long_string(emit(rxn), 10))
flux.append(solution[rxn.id] * rxn.metabolites[met])
txt = rxn.build_reaction_string(use_metabolite_names=names)
reaction.append(format_long_string(txt, 40 if fva is not None else 50))
flux_summary = pd.DataFrame({
"id": rxn_name,
"flux": flux,
"reaction": reaction
}, index=rxn_id)
if fva is not None:
if hasattr(fva, 'columns'):
fva_results = fva
else:
fva_results = flux_variability_analysis(
met.model, list(met.reactions), fraction_of_optimum=fva)
flux_summary["maximum"] = zeros(len(rxn_id), dtype=float)
flux_summary["minimum"] = zeros(len(rxn_id), dtype=float)
for rxn in rxns:
fmax = rxn.metabolites[met] * fva_results.at[rxn.id, "maximum"]
fmin = rxn.metabolites[met] * fva_results.at[rxn.id, "minimum"]
if abs(fmin) <= abs(fmax):
flux_summary.at[rxn.id, "fmax"] = fmax
flux_summary.at[rxn.id, "fmin"] = fmin
else:
# Reverse fluxes.
flux_summary.at[rxn.id, "fmax"] = fmin
flux_summary.at[rxn.id, "fmin"] = fmax
assert flux_summary["flux"].sum() < 1E-6, "Error in flux balance"
flux_summary = _process_flux_dataframe(flux_summary, fva, threshold,
floatfmt)
flux_summary['percent'] = 0
total_flux = flux_summary.loc[flux_summary.is_input, "flux"].sum()
flux_summary.loc[flux_summary.is_input, 'percent'] = \
flux_summary.loc[flux_summary.is_input, 'flux'] / total_flux
flux_summary.loc[~flux_summary.is_input, 'percent'] = \
flux_summary.loc[~flux_summary.is_input, 'flux'] / total_flux
flux_summary['percent'] = flux_summary.percent.apply(
lambda x: '{:.0%}'.format(x))
if fva is not None:
flux_table = tabulate(
flux_summary.loc[:, ['percent', 'flux', 'fva_fmt', 'id',
'reaction']].values, floatfmt=floatfmt,
headers=['%', 'FLUX', 'RANGE', 'RXN ID', 'REACTION']).split('\n')
else:
flux_table = tabulate(
flux_summary.loc[:, ['percent', 'flux', 'id', 'reaction']].values,
floatfmt=floatfmt, headers=['%', 'FLUX', 'RXN ID', 'REACTION']
).split('\n')
flux_table_head = flux_table[:2]
met_tag = "{0} ({1})".format(format_long_string(met.name, 45),
format_long_string(met.id, 10))
head = "PRODUCING REACTIONS -- " + met_tag
print_(head)
print_("-" * len(head))
print_('\n'.join(flux_table_head))
print_('\n'.join(
pd.np.array(flux_table[2:])[flux_summary.is_input.values]))
print_()
print_("CONSUMING REACTIONS -- " + met_tag)
print_("-" * len(head))
print_('\n'.join(flux_table_head))
print_('\n'.join(
pd.np.array(flux_table[2:])[~flux_summary.is_input.values]))
|
def function[metabolite_summary, parameter[met, solution, threshold, fva, names, floatfmt]]:
constant[
Print a summary of the production and consumption fluxes.
This method requires the model for which this metabolite is a part
to be solved.
Parameters
----------
solution : cobra.Solution, optional
A previously solved model solution to use for generating the
summary. If none provided (default), the summary method will
resolve the model. Note that the solution object must match the
model, i.e., changes to the model such as changed bounds,
added or removed reactions are not taken into account by this
method.
threshold : float, optional
Threshold below which fluxes are not reported.
fva : pandas.DataFrame, float or None, optional
Whether or not to include flux variability analysis in the output.
If given, fva should either be a previous FVA solution matching
the model or a float between 0 and 1 representing the
fraction of the optimum objective to be searched.
names : bool, optional
Emit reaction and metabolite names rather than identifiers (default
False).
floatfmt : string, optional
Format string for floats (default '.3g').
]
if name[names] begin[:]
variable[emit] assign[=] call[name[attrgetter], parameter[constant[name]]]
if compare[name[solution] is constant[None]] begin[:]
call[name[met].model.slim_optimize, parameter[]]
variable[solution] assign[=] call[name[get_solution], parameter[name[met].model]]
variable[rxns] assign[=] call[name[sorted], parameter[name[met].reactions]]
variable[rxn_id] assign[=] call[name[list], parameter[]]
variable[rxn_name] assign[=] call[name[list], parameter[]]
variable[flux] assign[=] call[name[list], parameter[]]
variable[reaction] assign[=] call[name[list], parameter[]]
for taget[name[rxn]] in starred[name[rxns]] begin[:]
call[name[rxn_id].append, parameter[name[rxn].id]]
call[name[rxn_name].append, parameter[call[name[format_long_string], parameter[call[name[emit], parameter[name[rxn]]], constant[10]]]]]
call[name[flux].append, parameter[binary_operation[call[name[solution]][name[rxn].id] * call[name[rxn].metabolites][name[met]]]]]
variable[txt] assign[=] call[name[rxn].build_reaction_string, parameter[]]
call[name[reaction].append, parameter[call[name[format_long_string], parameter[name[txt], <ast.IfExp object at 0x7da1b0152920>]]]]
variable[flux_summary] assign[=] call[name[pd].DataFrame, parameter[dictionary[[<ast.Constant object at 0x7da1b0150bb0>, <ast.Constant object at 0x7da1b0153d00>, <ast.Constant object at 0x7da1b0152ce0>], [<ast.Name object at 0x7da1b01502b0>, <ast.Name object at 0x7da1b0152b30>, <ast.Name object at 0x7da1b0150c40>]]]]
if compare[name[fva] is_not constant[None]] begin[:]
if call[name[hasattr], parameter[name[fva], constant[columns]]] begin[:]
variable[fva_results] assign[=] name[fva]
call[name[flux_summary]][constant[maximum]] assign[=] call[name[zeros], parameter[call[name[len], parameter[name[rxn_id]]]]]
call[name[flux_summary]][constant[minimum]] assign[=] call[name[zeros], parameter[call[name[len], parameter[name[rxn_id]]]]]
for taget[name[rxn]] in starred[name[rxns]] begin[:]
variable[fmax] assign[=] binary_operation[call[name[rxn].metabolites][name[met]] * call[name[fva_results].at][tuple[[<ast.Attribute object at 0x7da1b0150760>, <ast.Constant object at 0x7da1b0152800>]]]]
variable[fmin] assign[=] binary_operation[call[name[rxn].metabolites][name[met]] * call[name[fva_results].at][tuple[[<ast.Attribute object at 0x7da1b0060760>, <ast.Constant object at 0x7da1b0060070>]]]]
if compare[call[name[abs], parameter[name[fmin]]] less_or_equal[<=] call[name[abs], parameter[name[fmax]]]] begin[:]
call[name[flux_summary].at][tuple[[<ast.Attribute object at 0x7da1b016ce20>, <ast.Constant object at 0x7da1b016fe80>]]] assign[=] name[fmax]
call[name[flux_summary].at][tuple[[<ast.Attribute object at 0x7da1b016e260>, <ast.Constant object at 0x7da1b016cac0>]]] assign[=] name[fmin]
assert[compare[call[call[name[flux_summary]][constant[flux]].sum, parameter[]] less[<] constant[1e-06]]]
variable[flux_summary] assign[=] call[name[_process_flux_dataframe], parameter[name[flux_summary], name[fva], name[threshold], name[floatfmt]]]
call[name[flux_summary]][constant[percent]] assign[=] constant[0]
variable[total_flux] assign[=] call[call[name[flux_summary].loc][tuple[[<ast.Attribute object at 0x7da1b0152ad0>, <ast.Constant object at 0x7da1b0153430>]]].sum, parameter[]]
call[name[flux_summary].loc][tuple[[<ast.Attribute object at 0x7da1b0151ea0>, <ast.Constant object at 0x7da1b0152980>]]] assign[=] binary_operation[call[name[flux_summary].loc][tuple[[<ast.Attribute object at 0x7da1b0153550>, <ast.Constant object at 0x7da1b0152da0>]]] / name[total_flux]]
call[name[flux_summary].loc][tuple[[<ast.UnaryOp object at 0x7da1b0153e80>, <ast.Constant object at 0x7da1b0152d70>]]] assign[=] binary_operation[call[name[flux_summary].loc][tuple[[<ast.UnaryOp object at 0x7da1b0152110>, <ast.Constant object at 0x7da1b006d3c0>]]] / name[total_flux]]
call[name[flux_summary]][constant[percent]] assign[=] call[name[flux_summary].percent.apply, parameter[<ast.Lambda object at 0x7da1b006e230>]]
if compare[name[fva] is_not constant[None]] begin[:]
variable[flux_table] assign[=] call[call[name[tabulate], parameter[call[name[flux_summary].loc][tuple[[<ast.Slice object at 0x7da1b006e1a0>, <ast.List object at 0x7da1b006c400>]]].values]].split, parameter[constant[
]]]
variable[flux_table_head] assign[=] call[name[flux_table]][<ast.Slice object at 0x7da1b006c490>]
variable[met_tag] assign[=] call[constant[{0} ({1})].format, parameter[call[name[format_long_string], parameter[name[met].name, constant[45]]], call[name[format_long_string], parameter[name[met].id, constant[10]]]]]
variable[head] assign[=] binary_operation[constant[PRODUCING REACTIONS -- ] + name[met_tag]]
call[name[print_], parameter[name[head]]]
call[name[print_], parameter[binary_operation[constant[-] * call[name[len], parameter[name[head]]]]]]
call[name[print_], parameter[call[constant[
].join, parameter[name[flux_table_head]]]]]
call[name[print_], parameter[call[constant[
].join, parameter[call[call[name[pd].np.array, parameter[call[name[flux_table]][<ast.Slice object at 0x7da1b014e140>]]]][name[flux_summary].is_input.values]]]]]
call[name[print_], parameter[]]
call[name[print_], parameter[binary_operation[constant[CONSUMING REACTIONS -- ] + name[met_tag]]]]
call[name[print_], parameter[binary_operation[constant[-] * call[name[len], parameter[name[head]]]]]]
call[name[print_], parameter[call[constant[
].join, parameter[name[flux_table_head]]]]]
call[name[print_], parameter[call[constant[
].join, parameter[call[call[name[pd].np.array, parameter[call[name[flux_table]][<ast.Slice object at 0x7da1b014fdf0>]]]][<ast.UnaryOp object at 0x7da1b014f7f0>]]]]]
|
keyword[def] identifier[metabolite_summary] ( identifier[met] , identifier[solution] = keyword[None] , identifier[threshold] = literal[int] , identifier[fva] = keyword[False] ,
identifier[names] = keyword[False] , identifier[floatfmt] = literal[string] ):
literal[string]
keyword[if] identifier[names] :
identifier[emit] = identifier[attrgetter] ( literal[string] )
keyword[else] :
identifier[emit] = identifier[attrgetter] ( literal[string] )
keyword[if] identifier[solution] keyword[is] keyword[None] :
identifier[met] . identifier[model] . identifier[slim_optimize] ( identifier[error_value] = keyword[None] )
identifier[solution] = identifier[get_solution] ( identifier[met] . identifier[model] , identifier[reactions] = identifier[met] . identifier[reactions] )
identifier[rxns] = identifier[sorted] ( identifier[met] . identifier[reactions] , identifier[key] = identifier[attrgetter] ( literal[string] ))
identifier[rxn_id] = identifier[list] ()
identifier[rxn_name] = identifier[list] ()
identifier[flux] = identifier[list] ()
identifier[reaction] = identifier[list] ()
keyword[for] identifier[rxn] keyword[in] identifier[rxns] :
identifier[rxn_id] . identifier[append] ( identifier[rxn] . identifier[id] )
identifier[rxn_name] . identifier[append] ( identifier[format_long_string] ( identifier[emit] ( identifier[rxn] ), literal[int] ))
identifier[flux] . identifier[append] ( identifier[solution] [ identifier[rxn] . identifier[id] ]* identifier[rxn] . identifier[metabolites] [ identifier[met] ])
identifier[txt] = identifier[rxn] . identifier[build_reaction_string] ( identifier[use_metabolite_names] = identifier[names] )
identifier[reaction] . identifier[append] ( identifier[format_long_string] ( identifier[txt] , literal[int] keyword[if] identifier[fva] keyword[is] keyword[not] keyword[None] keyword[else] literal[int] ))
identifier[flux_summary] = identifier[pd] . identifier[DataFrame] ({
literal[string] : identifier[rxn_name] ,
literal[string] : identifier[flux] ,
literal[string] : identifier[reaction]
}, identifier[index] = identifier[rxn_id] )
keyword[if] identifier[fva] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[hasattr] ( identifier[fva] , literal[string] ):
identifier[fva_results] = identifier[fva]
keyword[else] :
identifier[fva_results] = identifier[flux_variability_analysis] (
identifier[met] . identifier[model] , identifier[list] ( identifier[met] . identifier[reactions] ), identifier[fraction_of_optimum] = identifier[fva] )
identifier[flux_summary] [ literal[string] ]= identifier[zeros] ( identifier[len] ( identifier[rxn_id] ), identifier[dtype] = identifier[float] )
identifier[flux_summary] [ literal[string] ]= identifier[zeros] ( identifier[len] ( identifier[rxn_id] ), identifier[dtype] = identifier[float] )
keyword[for] identifier[rxn] keyword[in] identifier[rxns] :
identifier[fmax] = identifier[rxn] . identifier[metabolites] [ identifier[met] ]* identifier[fva_results] . identifier[at] [ identifier[rxn] . identifier[id] , literal[string] ]
identifier[fmin] = identifier[rxn] . identifier[metabolites] [ identifier[met] ]* identifier[fva_results] . identifier[at] [ identifier[rxn] . identifier[id] , literal[string] ]
keyword[if] identifier[abs] ( identifier[fmin] )<= identifier[abs] ( identifier[fmax] ):
identifier[flux_summary] . identifier[at] [ identifier[rxn] . identifier[id] , literal[string] ]= identifier[fmax]
identifier[flux_summary] . identifier[at] [ identifier[rxn] . identifier[id] , literal[string] ]= identifier[fmin]
keyword[else] :
identifier[flux_summary] . identifier[at] [ identifier[rxn] . identifier[id] , literal[string] ]= identifier[fmin]
identifier[flux_summary] . identifier[at] [ identifier[rxn] . identifier[id] , literal[string] ]= identifier[fmax]
keyword[assert] identifier[flux_summary] [ literal[string] ]. identifier[sum] ()< literal[int] , literal[string]
identifier[flux_summary] = identifier[_process_flux_dataframe] ( identifier[flux_summary] , identifier[fva] , identifier[threshold] ,
identifier[floatfmt] )
identifier[flux_summary] [ literal[string] ]= literal[int]
identifier[total_flux] = identifier[flux_summary] . identifier[loc] [ identifier[flux_summary] . identifier[is_input] , literal[string] ]. identifier[sum] ()
identifier[flux_summary] . identifier[loc] [ identifier[flux_summary] . identifier[is_input] , literal[string] ]= identifier[flux_summary] . identifier[loc] [ identifier[flux_summary] . identifier[is_input] , literal[string] ]/ identifier[total_flux]
identifier[flux_summary] . identifier[loc] [~ identifier[flux_summary] . identifier[is_input] , literal[string] ]= identifier[flux_summary] . identifier[loc] [~ identifier[flux_summary] . identifier[is_input] , literal[string] ]/ identifier[total_flux]
identifier[flux_summary] [ literal[string] ]= identifier[flux_summary] . identifier[percent] . identifier[apply] (
keyword[lambda] identifier[x] : literal[string] . identifier[format] ( identifier[x] ))
keyword[if] identifier[fva] keyword[is] keyword[not] keyword[None] :
identifier[flux_table] = identifier[tabulate] (
identifier[flux_summary] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ]]. identifier[values] , identifier[floatfmt] = identifier[floatfmt] ,
identifier[headers] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]). identifier[split] ( literal[string] )
keyword[else] :
identifier[flux_table] = identifier[tabulate] (
identifier[flux_summary] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] , literal[string] ]]. identifier[values] ,
identifier[floatfmt] = identifier[floatfmt] , identifier[headers] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
). identifier[split] ( literal[string] )
identifier[flux_table_head] = identifier[flux_table] [: literal[int] ]
identifier[met_tag] = literal[string] . identifier[format] ( identifier[format_long_string] ( identifier[met] . identifier[name] , literal[int] ),
identifier[format_long_string] ( identifier[met] . identifier[id] , literal[int] ))
identifier[head] = literal[string] + identifier[met_tag]
identifier[print_] ( identifier[head] )
identifier[print_] ( literal[string] * identifier[len] ( identifier[head] ))
identifier[print_] ( literal[string] . identifier[join] ( identifier[flux_table_head] ))
identifier[print_] ( literal[string] . identifier[join] (
identifier[pd] . identifier[np] . identifier[array] ( identifier[flux_table] [ literal[int] :])[ identifier[flux_summary] . identifier[is_input] . identifier[values] ]))
identifier[print_] ()
identifier[print_] ( literal[string] + identifier[met_tag] )
identifier[print_] ( literal[string] * identifier[len] ( identifier[head] ))
identifier[print_] ( literal[string] . identifier[join] ( identifier[flux_table_head] ))
identifier[print_] ( literal[string] . identifier[join] (
identifier[pd] . identifier[np] . identifier[array] ( identifier[flux_table] [ literal[int] :])[~ identifier[flux_summary] . identifier[is_input] . identifier[values] ]))
|
def metabolite_summary(met, solution=None, threshold=0.01, fva=False, names=False, floatfmt='.3g'):
"""
Print a summary of the production and consumption fluxes.
This method requires the model for which this metabolite is a part
to be solved.
Parameters
----------
solution : cobra.Solution, optional
A previously solved model solution to use for generating the
summary. If none provided (default), the summary method will
resolve the model. Note that the solution object must match the
model, i.e., changes to the model such as changed bounds,
added or removed reactions are not taken into account by this
method.
threshold : float, optional
Threshold below which fluxes are not reported.
fva : pandas.DataFrame, float or None, optional
Whether or not to include flux variability analysis in the output.
If given, fva should either be a previous FVA solution matching
the model or a float between 0 and 1 representing the
fraction of the optimum objective to be searched.
names : bool, optional
Emit reaction and metabolite names rather than identifiers (default
False).
floatfmt : string, optional
Format string for floats (default '.3g').
"""
if names:
emit = attrgetter('name') # depends on [control=['if'], data=[]]
else:
emit = attrgetter('id')
if solution is None:
met.model.slim_optimize(error_value=None)
solution = get_solution(met.model, reactions=met.reactions) # depends on [control=['if'], data=['solution']]
rxns = sorted(met.reactions, key=attrgetter('id'))
rxn_id = list()
rxn_name = list()
flux = list()
reaction = list()
for rxn in rxns:
rxn_id.append(rxn.id)
rxn_name.append(format_long_string(emit(rxn), 10))
flux.append(solution[rxn.id] * rxn.metabolites[met])
txt = rxn.build_reaction_string(use_metabolite_names=names)
reaction.append(format_long_string(txt, 40 if fva is not None else 50)) # depends on [control=['for'], data=['rxn']]
flux_summary = pd.DataFrame({'id': rxn_name, 'flux': flux, 'reaction': reaction}, index=rxn_id)
if fva is not None:
if hasattr(fva, 'columns'):
fva_results = fva # depends on [control=['if'], data=[]]
else:
fva_results = flux_variability_analysis(met.model, list(met.reactions), fraction_of_optimum=fva)
flux_summary['maximum'] = zeros(len(rxn_id), dtype=float)
flux_summary['minimum'] = zeros(len(rxn_id), dtype=float)
for rxn in rxns:
fmax = rxn.metabolites[met] * fva_results.at[rxn.id, 'maximum']
fmin = rxn.metabolites[met] * fva_results.at[rxn.id, 'minimum']
if abs(fmin) <= abs(fmax):
flux_summary.at[rxn.id, 'fmax'] = fmax
flux_summary.at[rxn.id, 'fmin'] = fmin # depends on [control=['if'], data=[]]
else:
# Reverse fluxes.
flux_summary.at[rxn.id, 'fmax'] = fmin
flux_summary.at[rxn.id, 'fmin'] = fmax # depends on [control=['for'], data=['rxn']] # depends on [control=['if'], data=['fva']]
assert flux_summary['flux'].sum() < 1e-06, 'Error in flux balance'
flux_summary = _process_flux_dataframe(flux_summary, fva, threshold, floatfmt)
flux_summary['percent'] = 0
total_flux = flux_summary.loc[flux_summary.is_input, 'flux'].sum()
flux_summary.loc[flux_summary.is_input, 'percent'] = flux_summary.loc[flux_summary.is_input, 'flux'] / total_flux
flux_summary.loc[~flux_summary.is_input, 'percent'] = flux_summary.loc[~flux_summary.is_input, 'flux'] / total_flux
flux_summary['percent'] = flux_summary.percent.apply(lambda x: '{:.0%}'.format(x))
if fva is not None:
flux_table = tabulate(flux_summary.loc[:, ['percent', 'flux', 'fva_fmt', 'id', 'reaction']].values, floatfmt=floatfmt, headers=['%', 'FLUX', 'RANGE', 'RXN ID', 'REACTION']).split('\n') # depends on [control=['if'], data=[]]
else:
flux_table = tabulate(flux_summary.loc[:, ['percent', 'flux', 'id', 'reaction']].values, floatfmt=floatfmt, headers=['%', 'FLUX', 'RXN ID', 'REACTION']).split('\n')
flux_table_head = flux_table[:2]
met_tag = '{0} ({1})'.format(format_long_string(met.name, 45), format_long_string(met.id, 10))
head = 'PRODUCING REACTIONS -- ' + met_tag
print_(head)
print_('-' * len(head))
print_('\n'.join(flux_table_head))
print_('\n'.join(pd.np.array(flux_table[2:])[flux_summary.is_input.values]))
print_()
print_('CONSUMING REACTIONS -- ' + met_tag)
print_('-' * len(head))
print_('\n'.join(flux_table_head))
print_('\n'.join(pd.np.array(flux_table[2:])[~flux_summary.is_input.values]))
|
def islice(self, start=None, stop=None, reverse=False):
"""
Returns an iterator that slices `self` from `start` to `stop` index,
inclusive and exclusive respectively.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end.
"""
_len = self._len
if not _len:
return iter(())
start, stop, step = self._slice(slice(start, stop))
if start >= stop:
return iter(())
_pos = self._pos
min_pos, min_idx = _pos(start)
if stop == _len:
max_pos = len(self._lists) - 1
max_idx = len(self._lists[-1])
else:
max_pos, max_idx = _pos(stop)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
|
def function[islice, parameter[self, start, stop, reverse]]:
constant[
Returns an iterator that slices `self` from `start` to `stop` index,
inclusive and exclusive respectively.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end.
]
variable[_len] assign[=] name[self]._len
if <ast.UnaryOp object at 0x7da18bcc91e0> begin[:]
return[call[name[iter], parameter[tuple[[]]]]]
<ast.Tuple object at 0x7da18bcc9510> assign[=] call[name[self]._slice, parameter[call[name[slice], parameter[name[start], name[stop]]]]]
if compare[name[start] greater_or_equal[>=] name[stop]] begin[:]
return[call[name[iter], parameter[tuple[[]]]]]
variable[_pos] assign[=] name[self]._pos
<ast.Tuple object at 0x7da18bccbf10> assign[=] call[name[_pos], parameter[name[start]]]
if compare[name[stop] equal[==] name[_len]] begin[:]
variable[max_pos] assign[=] binary_operation[call[name[len], parameter[name[self]._lists]] - constant[1]]
variable[max_idx] assign[=] call[name[len], parameter[call[name[self]._lists][<ast.UnaryOp object at 0x7da18bcc92d0>]]]
return[call[name[self]._islice, parameter[name[min_pos], name[min_idx], name[max_pos], name[max_idx], name[reverse]]]]
|
keyword[def] identifier[islice] ( identifier[self] , identifier[start] = keyword[None] , identifier[stop] = keyword[None] , identifier[reverse] = keyword[False] ):
literal[string]
identifier[_len] = identifier[self] . identifier[_len]
keyword[if] keyword[not] identifier[_len] :
keyword[return] identifier[iter] (())
identifier[start] , identifier[stop] , identifier[step] = identifier[self] . identifier[_slice] ( identifier[slice] ( identifier[start] , identifier[stop] ))
keyword[if] identifier[start] >= identifier[stop] :
keyword[return] identifier[iter] (())
identifier[_pos] = identifier[self] . identifier[_pos]
identifier[min_pos] , identifier[min_idx] = identifier[_pos] ( identifier[start] )
keyword[if] identifier[stop] == identifier[_len] :
identifier[max_pos] = identifier[len] ( identifier[self] . identifier[_lists] )- literal[int]
identifier[max_idx] = identifier[len] ( identifier[self] . identifier[_lists] [- literal[int] ])
keyword[else] :
identifier[max_pos] , identifier[max_idx] = identifier[_pos] ( identifier[stop] )
keyword[return] identifier[self] . identifier[_islice] ( identifier[min_pos] , identifier[min_idx] , identifier[max_pos] , identifier[max_idx] , identifier[reverse] )
|
def islice(self, start=None, stop=None, reverse=False):
"""
Returns an iterator that slices `self` from `start` to `stop` index,
inclusive and exclusive respectively.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end.
"""
_len = self._len
if not _len:
return iter(()) # depends on [control=['if'], data=[]]
(start, stop, step) = self._slice(slice(start, stop))
if start >= stop:
return iter(()) # depends on [control=['if'], data=[]]
_pos = self._pos
(min_pos, min_idx) = _pos(start)
if stop == _len:
max_pos = len(self._lists) - 1
max_idx = len(self._lists[-1]) # depends on [control=['if'], data=[]]
else:
(max_pos, max_idx) = _pos(stop)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
|
def ipshuffle(l, random=None):
r"""Shuffle list `l` inplace and return it."""
import random as _random
_random.shuffle(l, random)
return l
|
def function[ipshuffle, parameter[l, random]]:
constant[Shuffle list `l` inplace and return it.]
import module[random] as alias[_random]
call[name[_random].shuffle, parameter[name[l], name[random]]]
return[name[l]]
|
keyword[def] identifier[ipshuffle] ( identifier[l] , identifier[random] = keyword[None] ):
literal[string]
keyword[import] identifier[random] keyword[as] identifier[_random]
identifier[_random] . identifier[shuffle] ( identifier[l] , identifier[random] )
keyword[return] identifier[l]
|
def ipshuffle(l, random=None):
"""Shuffle list `l` inplace and return it."""
import random as _random
_random.shuffle(l, random)
return l
|
def remove_query_param(self, key, value=None):
"""
Remove a query param from a URL
Set the value parameter if removing from a list.
:param string key: The key to delete
:param string value: The value of the param to delete (of more than one)
"""
parse_result = self.query_params()
if value is not None:
index = parse_result[key].index(value)
del parse_result[key][index]
else:
del parse_result[key]
return URL._mutate(self, query=unicode_urlencode(parse_result, doseq=True))
|
def function[remove_query_param, parameter[self, key, value]]:
constant[
Remove a query param from a URL
Set the value parameter if removing from a list.
:param string key: The key to delete
:param string value: The value of the param to delete (of more than one)
]
variable[parse_result] assign[=] call[name[self].query_params, parameter[]]
if compare[name[value] is_not constant[None]] begin[:]
variable[index] assign[=] call[call[name[parse_result]][name[key]].index, parameter[name[value]]]
<ast.Delete object at 0x7da1b0fcd180>
return[call[name[URL]._mutate, parameter[name[self]]]]
|
keyword[def] identifier[remove_query_param] ( identifier[self] , identifier[key] , identifier[value] = keyword[None] ):
literal[string]
identifier[parse_result] = identifier[self] . identifier[query_params] ()
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[index] = identifier[parse_result] [ identifier[key] ]. identifier[index] ( identifier[value] )
keyword[del] identifier[parse_result] [ identifier[key] ][ identifier[index] ]
keyword[else] :
keyword[del] identifier[parse_result] [ identifier[key] ]
keyword[return] identifier[URL] . identifier[_mutate] ( identifier[self] , identifier[query] = identifier[unicode_urlencode] ( identifier[parse_result] , identifier[doseq] = keyword[True] ))
|
def remove_query_param(self, key, value=None):
"""
Remove a query param from a URL
Set the value parameter if removing from a list.
:param string key: The key to delete
:param string value: The value of the param to delete (of more than one)
"""
parse_result = self.query_params()
if value is not None:
index = parse_result[key].index(value)
del parse_result[key][index] # depends on [control=['if'], data=['value']]
else:
del parse_result[key]
return URL._mutate(self, query=unicode_urlencode(parse_result, doseq=True))
|
def _get_color_definitions(data):
"""Returns the list of custom color definitions for the TikZ file.
"""
definitions = []
fmt = "\\definecolor{{{}}}{{rgb}}{{" + ",".join(3 * [data["float format"]]) + "}}"
for name, rgb in data["custom colors"].items():
definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2]))
return definitions
|
def function[_get_color_definitions, parameter[data]]:
constant[Returns the list of custom color definitions for the TikZ file.
]
variable[definitions] assign[=] list[[]]
variable[fmt] assign[=] binary_operation[binary_operation[constant[\definecolor{{{}}}{{rgb}}{{] + call[constant[,].join, parameter[binary_operation[constant[3] * list[[<ast.Subscript object at 0x7da1b16b5a80>]]]]]] + constant[}}]]
for taget[tuple[[<ast.Name object at 0x7da1b16b4f70>, <ast.Name object at 0x7da1b16b77c0>]]] in starred[call[call[name[data]][constant[custom colors]].items, parameter[]]] begin[:]
call[name[definitions].append, parameter[call[name[fmt].format, parameter[name[name], call[name[rgb]][constant[0]], call[name[rgb]][constant[1]], call[name[rgb]][constant[2]]]]]]
return[name[definitions]]
|
keyword[def] identifier[_get_color_definitions] ( identifier[data] ):
literal[string]
identifier[definitions] =[]
identifier[fmt] = literal[string] + literal[string] . identifier[join] ( literal[int] *[ identifier[data] [ literal[string] ]])+ literal[string]
keyword[for] identifier[name] , identifier[rgb] keyword[in] identifier[data] [ literal[string] ]. identifier[items] ():
identifier[definitions] . identifier[append] ( identifier[fmt] . identifier[format] ( identifier[name] , identifier[rgb] [ literal[int] ], identifier[rgb] [ literal[int] ], identifier[rgb] [ literal[int] ]))
keyword[return] identifier[definitions]
|
def _get_color_definitions(data):
"""Returns the list of custom color definitions for the TikZ file.
"""
definitions = []
fmt = '\\definecolor{{{}}}{{rgb}}{{' + ','.join(3 * [data['float format']]) + '}}'
for (name, rgb) in data['custom colors'].items():
definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2])) # depends on [control=['for'], data=[]]
return definitions
|
def fold_list(input_list, max_width=None):
"""
Fold the entries in input_list. If max_width is not None, fold only if it
is longer than max_width. Otherwise fold each entry.
"""
if not input_list:
return ""
if not isinstance(input_list[0], six.string_types):
input_list = [str(item) for item in input_list]
if max_width:
mystr = ", ".join(input_list)
return fold_string(mystr, max_width)
return "\n".join(input_list)
|
def function[fold_list, parameter[input_list, max_width]]:
constant[
Fold the entries in input_list. If max_width is not None, fold only if it
is longer than max_width. Otherwise fold each entry.
]
if <ast.UnaryOp object at 0x7da1b0ef4c10> begin[:]
return[constant[]]
if <ast.UnaryOp object at 0x7da1b0ef6ef0> begin[:]
variable[input_list] assign[=] <ast.ListComp object at 0x7da1b0ef78b0>
if name[max_width] begin[:]
variable[mystr] assign[=] call[constant[, ].join, parameter[name[input_list]]]
return[call[name[fold_string], parameter[name[mystr], name[max_width]]]]
return[call[constant[
].join, parameter[name[input_list]]]]
|
keyword[def] identifier[fold_list] ( identifier[input_list] , identifier[max_width] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[input_list] :
keyword[return] literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[input_list] [ literal[int] ], identifier[six] . identifier[string_types] ):
identifier[input_list] =[ identifier[str] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[input_list] ]
keyword[if] identifier[max_width] :
identifier[mystr] = literal[string] . identifier[join] ( identifier[input_list] )
keyword[return] identifier[fold_string] ( identifier[mystr] , identifier[max_width] )
keyword[return] literal[string] . identifier[join] ( identifier[input_list] )
|
def fold_list(input_list, max_width=None):
"""
Fold the entries in input_list. If max_width is not None, fold only if it
is longer than max_width. Otherwise fold each entry.
"""
if not input_list:
return '' # depends on [control=['if'], data=[]]
if not isinstance(input_list[0], six.string_types):
input_list = [str(item) for item in input_list] # depends on [control=['if'], data=[]]
if max_width:
mystr = ', '.join(input_list)
return fold_string(mystr, max_width) # depends on [control=['if'], data=[]]
return '\n'.join(input_list)
|
def construct_reference_system(
symbols,
candidates=None,
options=None,
):
"""Take a list of symbols and construct gas phase
references system, when possible avoiding O2.
Candidates can be rearranged, where earlier candidates
get higher preference than later candidates
assume symbols sorted by atomic number
"""
if hasattr(options, 'no_hydrogen') and options.no_hydrogen:
add_hydrogen = False
else:
add_hydrogen = True
references = {}
sorted_candidates = [
'H2',
'H2O',
'NH3',
'N2',
'CH4',
'CO',
'H2S',
'HCl',
'O2']
if candidates is None:
candidates = sorted_candidates
else:
odd_candidates = [c for c in candidates if c not in sorted_candidates]
candidates = [c for c in sorted_candidates if c in candidates] \
+ odd_candidates
added_symbols = []
# go symbols in adsorbate
# to add reference species in procedural manner
for symbol in symbols:
added_symbols.append(symbol)
for candidate in candidates:
_symbols = ase.symbols.string2symbols(candidate)
# Add partial adsorbate species
# is subset of reference species
# and reference species
# is subset of full adsorbate species set
if set(added_symbols) <= set(list(references.keys()) + _symbols) \
and set(list(references.keys()) + _symbols) <= set(symbols) \
and candidate not in references.values():
references[symbol] = candidate
break
else:
raise UserWarning((
"No candidate satisfied {symbol}. Add more candidates\n"
" Symbols {symbols}\n"
" _Symbols {_symbols}\n"
" References {references}\n"
" Candidates {candidates}\n"
).format(
symbol=symbol,
symbols=symbols,
_symbols=_symbols,
candidates=candidates,
references=list(references.keys()),
))
sorted_references = []
references = list(references.items())
# put references in order so that each reference
# only adds one one additional species in each step
# while references:
# for i, reference in enumerate(references):
# if len(set(ase.symbols.string2symbols(reference[1])) -
# set(x[0] for x in sorted_references)) == 1:
# sorted_references.append(references.pop(i))
# break
return references
|
def function[construct_reference_system, parameter[symbols, candidates, options]]:
constant[Take a list of symbols and construct gas phase
references system, when possible avoiding O2.
Candidates can be rearranged, where earlier candidates
get higher preference than later candidates
assume symbols sorted by atomic number
]
if <ast.BoolOp object at 0x7da1b2429150> begin[:]
variable[add_hydrogen] assign[=] constant[False]
variable[references] assign[=] dictionary[[], []]
variable[sorted_candidates] assign[=] list[[<ast.Constant object at 0x7da1b24060b0>, <ast.Constant object at 0x7da1b24aca00>, <ast.Constant object at 0x7da1b24af4f0>, <ast.Constant object at 0x7da1b24af970>, <ast.Constant object at 0x7da1b24aeaa0>, <ast.Constant object at 0x7da1b24ac070>, <ast.Constant object at 0x7da1b24afa00>, <ast.Constant object at 0x7da1b24af430>, <ast.Constant object at 0x7da1b24af5e0>]]
if compare[name[candidates] is constant[None]] begin[:]
variable[candidates] assign[=] name[sorted_candidates]
variable[added_symbols] assign[=] list[[]]
for taget[name[symbol]] in starred[name[symbols]] begin[:]
call[name[added_symbols].append, parameter[name[symbol]]]
for taget[name[candidate]] in starred[name[candidates]] begin[:]
variable[_symbols] assign[=] call[name[ase].symbols.string2symbols, parameter[name[candidate]]]
if <ast.BoolOp object at 0x7da1b242bcd0> begin[:]
call[name[references]][name[symbol]] assign[=] name[candidate]
break
variable[sorted_references] assign[=] list[[]]
variable[references] assign[=] call[name[list], parameter[call[name[references].items, parameter[]]]]
return[name[references]]
|
keyword[def] identifier[construct_reference_system] (
identifier[symbols] ,
identifier[candidates] = keyword[None] ,
identifier[options] = keyword[None] ,
):
literal[string]
keyword[if] identifier[hasattr] ( identifier[options] , literal[string] ) keyword[and] identifier[options] . identifier[no_hydrogen] :
identifier[add_hydrogen] = keyword[False]
keyword[else] :
identifier[add_hydrogen] = keyword[True]
identifier[references] ={}
identifier[sorted_candidates] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
keyword[if] identifier[candidates] keyword[is] keyword[None] :
identifier[candidates] = identifier[sorted_candidates]
keyword[else] :
identifier[odd_candidates] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[candidates] keyword[if] identifier[c] keyword[not] keyword[in] identifier[sorted_candidates] ]
identifier[candidates] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[sorted_candidates] keyword[if] identifier[c] keyword[in] identifier[candidates] ]+ identifier[odd_candidates]
identifier[added_symbols] =[]
keyword[for] identifier[symbol] keyword[in] identifier[symbols] :
identifier[added_symbols] . identifier[append] ( identifier[symbol] )
keyword[for] identifier[candidate] keyword[in] identifier[candidates] :
identifier[_symbols] = identifier[ase] . identifier[symbols] . identifier[string2symbols] ( identifier[candidate] )
keyword[if] identifier[set] ( identifier[added_symbols] )<= identifier[set] ( identifier[list] ( identifier[references] . identifier[keys] ())+ identifier[_symbols] ) keyword[and] identifier[set] ( identifier[list] ( identifier[references] . identifier[keys] ())+ identifier[_symbols] )<= identifier[set] ( identifier[symbols] ) keyword[and] identifier[candidate] keyword[not] keyword[in] identifier[references] . identifier[values] ():
identifier[references] [ identifier[symbol] ]= identifier[candidate]
keyword[break]
keyword[else] :
keyword[raise] identifier[UserWarning] ((
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
). identifier[format] (
identifier[symbol] = identifier[symbol] ,
identifier[symbols] = identifier[symbols] ,
identifier[_symbols] = identifier[_symbols] ,
identifier[candidates] = identifier[candidates] ,
identifier[references] = identifier[list] ( identifier[references] . identifier[keys] ()),
))
identifier[sorted_references] =[]
identifier[references] = identifier[list] ( identifier[references] . identifier[items] ())
keyword[return] identifier[references]
|
def construct_reference_system(symbols, candidates=None, options=None):
"""Take a list of symbols and construct gas phase
references system, when possible avoiding O2.
Candidates can be rearranged, where earlier candidates
get higher preference than later candidates
assume symbols sorted by atomic number
"""
if hasattr(options, 'no_hydrogen') and options.no_hydrogen:
add_hydrogen = False # depends on [control=['if'], data=[]]
else:
add_hydrogen = True
references = {}
sorted_candidates = ['H2', 'H2O', 'NH3', 'N2', 'CH4', 'CO', 'H2S', 'HCl', 'O2']
if candidates is None:
candidates = sorted_candidates # depends on [control=['if'], data=['candidates']]
else:
odd_candidates = [c for c in candidates if c not in sorted_candidates]
candidates = [c for c in sorted_candidates if c in candidates] + odd_candidates
added_symbols = []
# go symbols in adsorbate
# to add reference species in procedural manner
for symbol in symbols:
added_symbols.append(symbol)
for candidate in candidates:
_symbols = ase.symbols.string2symbols(candidate)
# Add partial adsorbate species
# is subset of reference species
# and reference species
# is subset of full adsorbate species set
if set(added_symbols) <= set(list(references.keys()) + _symbols) and set(list(references.keys()) + _symbols) <= set(symbols) and (candidate not in references.values()):
references[symbol] = candidate
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['candidate']]
else:
raise UserWarning('No candidate satisfied {symbol}. Add more candidates\n Symbols {symbols}\n _Symbols {_symbols}\n References {references}\n Candidates {candidates}\n'.format(symbol=symbol, symbols=symbols, _symbols=_symbols, candidates=candidates, references=list(references.keys()))) # depends on [control=['for'], data=['symbol']]
sorted_references = []
references = list(references.items())
# put references in order so that each reference
# only adds one one additional species in each step
# while references:
# for i, reference in enumerate(references):
# if len(set(ase.symbols.string2symbols(reference[1])) -
# set(x[0] for x in sorted_references)) == 1:
# sorted_references.append(references.pop(i))
# break
return references
|
def fmt_size(size, binary=True):
'''
Get size and unit.
:param size: size in bytes
:type size: int
:param binary: whether use binary or standard units, defaults to True
:type binary: bool
:return: size and unit
:rtype: tuple of int and unit as str
'''
if binary:
fmt_sizes = binary_units
fmt_divider = 1024.
else:
fmt_sizes = standard_units
fmt_divider = 1000.
for fmt in fmt_sizes[:-1]:
if size < 1000:
return (size, fmt)
size /= fmt_divider
return size, fmt_sizes[-1]
|
def function[fmt_size, parameter[size, binary]]:
constant[
Get size and unit.
:param size: size in bytes
:type size: int
:param binary: whether use binary or standard units, defaults to True
:type binary: bool
:return: size and unit
:rtype: tuple of int and unit as str
]
if name[binary] begin[:]
variable[fmt_sizes] assign[=] name[binary_units]
variable[fmt_divider] assign[=] constant[1024.0]
for taget[name[fmt]] in starred[call[name[fmt_sizes]][<ast.Slice object at 0x7da1b0579ab0>]] begin[:]
if compare[name[size] less[<] constant[1000]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b057b610>, <ast.Name object at 0x7da1b057b760>]]]
<ast.AugAssign object at 0x7da1b057b9d0>
return[tuple[[<ast.Name object at 0x7da1b057bfd0>, <ast.Subscript object at 0x7da1b05780d0>]]]
|
keyword[def] identifier[fmt_size] ( identifier[size] , identifier[binary] = keyword[True] ):
literal[string]
keyword[if] identifier[binary] :
identifier[fmt_sizes] = identifier[binary_units]
identifier[fmt_divider] = literal[int]
keyword[else] :
identifier[fmt_sizes] = identifier[standard_units]
identifier[fmt_divider] = literal[int]
keyword[for] identifier[fmt] keyword[in] identifier[fmt_sizes] [:- literal[int] ]:
keyword[if] identifier[size] < literal[int] :
keyword[return] ( identifier[size] , identifier[fmt] )
identifier[size] /= identifier[fmt_divider]
keyword[return] identifier[size] , identifier[fmt_sizes] [- literal[int] ]
|
def fmt_size(size, binary=True):
"""
Get size and unit.
:param size: size in bytes
:type size: int
:param binary: whether use binary or standard units, defaults to True
:type binary: bool
:return: size and unit
:rtype: tuple of int and unit as str
"""
if binary:
fmt_sizes = binary_units
fmt_divider = 1024.0 # depends on [control=['if'], data=[]]
else:
fmt_sizes = standard_units
fmt_divider = 1000.0
for fmt in fmt_sizes[:-1]:
if size < 1000:
return (size, fmt) # depends on [control=['if'], data=['size']]
size /= fmt_divider # depends on [control=['for'], data=['fmt']]
return (size, fmt_sizes[-1])
|
def set(self, document_data, merge=False):
"""Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the fields in
``document_data``. This method performs almost the same functionality
as :meth:`create`. The only difference is that this method doesn't
make any requirements on the existence of the document (unless
``option`` is used), whereas as :meth:`create` will fail if the
document already exists.
Args:
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
"""
batch = self._client.batch()
batch.set(self, document_data, merge=merge)
write_results = batch.commit()
return _first_write_result(write_results)
|
def function[set, parameter[self, document_data, merge]]:
constant[Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the fields in
``document_data``. This method performs almost the same functionality
as :meth:`create`. The only difference is that this method doesn't
make any requirements on the existence of the document (unless
``option`` is used), whereas as :meth:`create` will fail if the
document already exists.
Args:
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
]
variable[batch] assign[=] call[name[self]._client.batch, parameter[]]
call[name[batch].set, parameter[name[self], name[document_data]]]
variable[write_results] assign[=] call[name[batch].commit, parameter[]]
return[call[name[_first_write_result], parameter[name[write_results]]]]
|
keyword[def] identifier[set] ( identifier[self] , identifier[document_data] , identifier[merge] = keyword[False] ):
literal[string]
identifier[batch] = identifier[self] . identifier[_client] . identifier[batch] ()
identifier[batch] . identifier[set] ( identifier[self] , identifier[document_data] , identifier[merge] = identifier[merge] )
identifier[write_results] = identifier[batch] . identifier[commit] ()
keyword[return] identifier[_first_write_result] ( identifier[write_results] )
|
def set(self, document_data, merge=False):
"""Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the fields in
``document_data``. This method performs almost the same functionality
as :meth:`create`. The only difference is that this method doesn't
make any requirements on the existence of the document (unless
``option`` is used), whereas as :meth:`create` will fail if the
document already exists.
Args:
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
"""
batch = self._client.batch()
batch.set(self, document_data, merge=merge)
write_results = batch.commit()
return _first_write_result(write_results)
|
def __writepid(self, pid):
"""
HoverFly fails to launch if it's already running on
the same ports. So we have to keep track of them using
temp files with the proxy port and admin port, containing
the processe's PID.
"""
import tempfile
d = tempfile.gettempdir()
name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort))
with open(name, 'w') as f:
f.write(str(pid))
logging.debug("writing to %s"%name)
|
def function[__writepid, parameter[self, pid]]:
constant[
HoverFly fails to launch if it's already running on
the same ports. So we have to keep track of them using
temp files with the proxy port and admin port, containing
the processe's PID.
]
import module[tempfile]
variable[d] assign[=] call[name[tempfile].gettempdir, parameter[]]
variable[name] assign[=] call[name[os].path.join, parameter[name[d], binary_operation[constant[hoverpy.%i.%i] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2054a5ea0>, <ast.Attribute object at 0x7da2054a71c0>]]]]]
with call[name[open], parameter[name[name], constant[w]]] begin[:]
call[name[f].write, parameter[call[name[str], parameter[name[pid]]]]]
call[name[logging].debug, parameter[binary_operation[constant[writing to %s] <ast.Mod object at 0x7da2590d6920> name[name]]]]
|
keyword[def] identifier[__writepid] ( identifier[self] , identifier[pid] ):
literal[string]
keyword[import] identifier[tempfile]
identifier[d] = identifier[tempfile] . identifier[gettempdir] ()
identifier[name] = identifier[os] . identifier[path] . identifier[join] ( identifier[d] , literal[string] %( identifier[self] . identifier[_proxyPort] , identifier[self] . identifier[_adminPort] ))
keyword[with] identifier[open] ( identifier[name] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[str] ( identifier[pid] ))
identifier[logging] . identifier[debug] ( literal[string] % identifier[name] )
|
def __writepid(self, pid):
"""
HoverFly fails to launch if it's already running on
the same ports. So we have to keep track of them using
temp files with the proxy port and admin port, containing
the processe's PID.
"""
import tempfile
d = tempfile.gettempdir()
name = os.path.join(d, 'hoverpy.%i.%i' % (self._proxyPort, self._adminPort))
with open(name, 'w') as f:
f.write(str(pid))
logging.debug('writing to %s' % name) # depends on [control=['with'], data=['f']]
|
def translate(
nucleotide_sequence,
first_codon_is_start=True,
to_stop=True,
truncate=False):
"""Translates cDNA coding sequence into amino acid protein sequence.
Should typically start with a start codon but allowing non-methionine
first residues since the CDS we're translating might have been affected
by a start loss mutation.
The sequence may include the 3' UTR but will stop translation at the first
encountered stop codon.
Parameters
----------
nucleotide_sequence : BioPython Seq
cDNA sequence
first_codon_is_start : bool
Treat the beginning of nucleotide_sequence (translates methionin)
truncate : bool
Truncate sequence if it's not a multiple of 3 (default = False)
Returns BioPython Seq of amino acids
"""
if not isinstance(nucleotide_sequence, Seq):
nucleotide_sequence = Seq(nucleotide_sequence)
if truncate:
# if sequence isn't a multiple of 3, truncate it so BioPython
# doesn't complain
n_nucleotides = int(len(nucleotide_sequence) / 3) * 3
nucleotide_sequence = nucleotide_sequence[:n_nucleotides]
else:
n_nucleotides = len(nucleotide_sequence)
assert n_nucleotides % 3 == 0, \
("Expected nucleotide sequence to be multiple of 3"
" but got %s of length %d") % (
nucleotide_sequence,
n_nucleotides)
# passing cds=False to translate since we may want to deal with premature
# stop codons
protein_sequence = nucleotide_sequence.translate(to_stop=to_stop, cds=False)
if first_codon_is_start and (
len(protein_sequence) == 0 or protein_sequence[0] != "M"):
if nucleotide_sequence[:3] in START_CODONS:
# TODO: figure out when these should be made into methionines
# and when left as whatever amino acid they normally code for
# e.g. Leucine start codons
# See: DOI: 10.1371/journal.pbio.0020397
return "M" + protein_sequence[1:]
else:
raise ValueError(
("Expected first codon of %s to be start codon"
" (one of %s) but got %s") % (
protein_sequence[:10],
START_CODONS,
nucleotide_sequence))
return protein_sequence
|
def function[translate, parameter[nucleotide_sequence, first_codon_is_start, to_stop, truncate]]:
constant[Translates cDNA coding sequence into amino acid protein sequence.
Should typically start with a start codon but allowing non-methionine
first residues since the CDS we're translating might have been affected
by a start loss mutation.
The sequence may include the 3' UTR but will stop translation at the first
encountered stop codon.
Parameters
----------
nucleotide_sequence : BioPython Seq
cDNA sequence
first_codon_is_start : bool
Treat the beginning of nucleotide_sequence (translates methionin)
truncate : bool
Truncate sequence if it's not a multiple of 3 (default = False)
Returns BioPython Seq of amino acids
]
if <ast.UnaryOp object at 0x7da1b0464910> begin[:]
variable[nucleotide_sequence] assign[=] call[name[Seq], parameter[name[nucleotide_sequence]]]
if name[truncate] begin[:]
variable[n_nucleotides] assign[=] binary_operation[call[name[int], parameter[binary_operation[call[name[len], parameter[name[nucleotide_sequence]]] / constant[3]]]] * constant[3]]
variable[nucleotide_sequence] assign[=] call[name[nucleotide_sequence]][<ast.Slice object at 0x7da1b0464d30>]
assert[compare[binary_operation[name[n_nucleotides] <ast.Mod object at 0x7da2590d6920> constant[3]] equal[==] constant[0]]]
variable[protein_sequence] assign[=] call[name[nucleotide_sequence].translate, parameter[]]
if <ast.BoolOp object at 0x7da1b04d8b50> begin[:]
if compare[call[name[nucleotide_sequence]][<ast.Slice object at 0x7da1b04d98a0>] in name[START_CODONS]] begin[:]
return[binary_operation[constant[M] + call[name[protein_sequence]][<ast.Slice object at 0x7da1b04da050>]]]
return[name[protein_sequence]]
|
keyword[def] identifier[translate] (
identifier[nucleotide_sequence] ,
identifier[first_codon_is_start] = keyword[True] ,
identifier[to_stop] = keyword[True] ,
identifier[truncate] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[nucleotide_sequence] , identifier[Seq] ):
identifier[nucleotide_sequence] = identifier[Seq] ( identifier[nucleotide_sequence] )
keyword[if] identifier[truncate] :
identifier[n_nucleotides] = identifier[int] ( identifier[len] ( identifier[nucleotide_sequence] )/ literal[int] )* literal[int]
identifier[nucleotide_sequence] = identifier[nucleotide_sequence] [: identifier[n_nucleotides] ]
keyword[else] :
identifier[n_nucleotides] = identifier[len] ( identifier[nucleotide_sequence] )
keyword[assert] identifier[n_nucleotides] % literal[int] == literal[int] ,( literal[string]
literal[string] )%(
identifier[nucleotide_sequence] ,
identifier[n_nucleotides] )
identifier[protein_sequence] = identifier[nucleotide_sequence] . identifier[translate] ( identifier[to_stop] = identifier[to_stop] , identifier[cds] = keyword[False] )
keyword[if] identifier[first_codon_is_start] keyword[and] (
identifier[len] ( identifier[protein_sequence] )== literal[int] keyword[or] identifier[protein_sequence] [ literal[int] ]!= literal[string] ):
keyword[if] identifier[nucleotide_sequence] [: literal[int] ] keyword[in] identifier[START_CODONS] :
keyword[return] literal[string] + identifier[protein_sequence] [ literal[int] :]
keyword[else] :
keyword[raise] identifier[ValueError] (
( literal[string]
literal[string] )%(
identifier[protein_sequence] [: literal[int] ],
identifier[START_CODONS] ,
identifier[nucleotide_sequence] ))
keyword[return] identifier[protein_sequence]
|
def translate(nucleotide_sequence, first_codon_is_start=True, to_stop=True, truncate=False):
"""Translates cDNA coding sequence into amino acid protein sequence.
Should typically start with a start codon but allowing non-methionine
first residues since the CDS we're translating might have been affected
by a start loss mutation.
The sequence may include the 3' UTR but will stop translation at the first
encountered stop codon.
Parameters
----------
nucleotide_sequence : BioPython Seq
cDNA sequence
first_codon_is_start : bool
Treat the beginning of nucleotide_sequence (translates methionin)
truncate : bool
Truncate sequence if it's not a multiple of 3 (default = False)
Returns BioPython Seq of amino acids
"""
if not isinstance(nucleotide_sequence, Seq):
nucleotide_sequence = Seq(nucleotide_sequence) # depends on [control=['if'], data=[]]
if truncate:
# if sequence isn't a multiple of 3, truncate it so BioPython
# doesn't complain
n_nucleotides = int(len(nucleotide_sequence) / 3) * 3
nucleotide_sequence = nucleotide_sequence[:n_nucleotides] # depends on [control=['if'], data=[]]
else:
n_nucleotides = len(nucleotide_sequence)
assert n_nucleotides % 3 == 0, 'Expected nucleotide sequence to be multiple of 3 but got %s of length %d' % (nucleotide_sequence, n_nucleotides)
# passing cds=False to translate since we may want to deal with premature
# stop codons
protein_sequence = nucleotide_sequence.translate(to_stop=to_stop, cds=False)
if first_codon_is_start and (len(protein_sequence) == 0 or protein_sequence[0] != 'M'):
if nucleotide_sequence[:3] in START_CODONS:
# TODO: figure out when these should be made into methionines
# and when left as whatever amino acid they normally code for
# e.g. Leucine start codons
# See: DOI: 10.1371/journal.pbio.0020397
return 'M' + protein_sequence[1:] # depends on [control=['if'], data=[]]
else:
raise ValueError('Expected first codon of %s to be start codon (one of %s) but got %s' % (protein_sequence[:10], START_CODONS, nucleotide_sequence)) # depends on [control=['if'], data=[]]
return protein_sequence
|
def shift(self, modelResult):
"""Shift the model result and return the new instance.
Queues up the T(i+1) prediction value and emits a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction pair.
:param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult`
instance to shift.
:return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that
has been shifted
"""
inferencesToWrite = {}
if self._inferenceBuffer is None:
maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)
self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1)
self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))
for inferenceElement, inference in modelResult.inferences.iteritems():
if isinstance(inference, dict):
inferencesToWrite[inferenceElement] = {}
for key, _ in inference.iteritems():
delay = InferenceElement.getTemporalDelay(inferenceElement, key)
if len(self._inferenceBuffer) > delay:
prevInference = self._inferenceBuffer[delay][inferenceElement][key]
inferencesToWrite[inferenceElement][key] = prevInference
else:
inferencesToWrite[inferenceElement][key] = None
else:
delay = InferenceElement.getTemporalDelay(inferenceElement)
if len(self._inferenceBuffer) > delay:
inferencesToWrite[inferenceElement] = (
self._inferenceBuffer[delay][inferenceElement])
else:
if type(inference) in (list, tuple):
inferencesToWrite[inferenceElement] = [None] * len(inference)
else:
inferencesToWrite[inferenceElement] = None
shiftedResult = ModelResult(rawInput=modelResult.rawInput,
sensorInput=modelResult.sensorInput,
inferences=inferencesToWrite,
metrics=modelResult.metrics,
predictedFieldIdx=modelResult.predictedFieldIdx,
predictedFieldName=modelResult.predictedFieldName)
return shiftedResult
|
def function[shift, parameter[self, modelResult]]:
constant[Shift the model result and return the new instance.
Queues up the T(i+1) prediction value and emits a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction pair.
:param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult`
instance to shift.
:return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that
has been shifted
]
variable[inferencesToWrite] assign[=] dictionary[[], []]
if compare[name[self]._inferenceBuffer is constant[None]] begin[:]
variable[maxDelay] assign[=] call[name[InferenceElement].getMaxDelay, parameter[name[modelResult].inferences]]
name[self]._inferenceBuffer assign[=] call[name[collections].deque, parameter[]]
call[name[self]._inferenceBuffer.appendleft, parameter[call[name[copy].deepcopy, parameter[name[modelResult].inferences]]]]
for taget[tuple[[<ast.Name object at 0x7da20c794880>, <ast.Name object at 0x7da20c796530>]]] in starred[call[name[modelResult].inferences.iteritems, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[inference], name[dict]]] begin[:]
call[name[inferencesToWrite]][name[inferenceElement]] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c794940>, <ast.Name object at 0x7da20c795ff0>]]] in starred[call[name[inference].iteritems, parameter[]]] begin[:]
variable[delay] assign[=] call[name[InferenceElement].getTemporalDelay, parameter[name[inferenceElement], name[key]]]
if compare[call[name[len], parameter[name[self]._inferenceBuffer]] greater[>] name[delay]] begin[:]
variable[prevInference] assign[=] call[call[call[name[self]._inferenceBuffer][name[delay]]][name[inferenceElement]]][name[key]]
call[call[name[inferencesToWrite]][name[inferenceElement]]][name[key]] assign[=] name[prevInference]
variable[shiftedResult] assign[=] call[name[ModelResult], parameter[]]
return[name[shiftedResult]]
|
keyword[def] identifier[shift] ( identifier[self] , identifier[modelResult] ):
literal[string]
identifier[inferencesToWrite] ={}
keyword[if] identifier[self] . identifier[_inferenceBuffer] keyword[is] keyword[None] :
identifier[maxDelay] = identifier[InferenceElement] . identifier[getMaxDelay] ( identifier[modelResult] . identifier[inferences] )
identifier[self] . identifier[_inferenceBuffer] = identifier[collections] . identifier[deque] ( identifier[maxlen] = identifier[maxDelay] + literal[int] )
identifier[self] . identifier[_inferenceBuffer] . identifier[appendleft] ( identifier[copy] . identifier[deepcopy] ( identifier[modelResult] . identifier[inferences] ))
keyword[for] identifier[inferenceElement] , identifier[inference] keyword[in] identifier[modelResult] . identifier[inferences] . identifier[iteritems] ():
keyword[if] identifier[isinstance] ( identifier[inference] , identifier[dict] ):
identifier[inferencesToWrite] [ identifier[inferenceElement] ]={}
keyword[for] identifier[key] , identifier[_] keyword[in] identifier[inference] . identifier[iteritems] ():
identifier[delay] = identifier[InferenceElement] . identifier[getTemporalDelay] ( identifier[inferenceElement] , identifier[key] )
keyword[if] identifier[len] ( identifier[self] . identifier[_inferenceBuffer] )> identifier[delay] :
identifier[prevInference] = identifier[self] . identifier[_inferenceBuffer] [ identifier[delay] ][ identifier[inferenceElement] ][ identifier[key] ]
identifier[inferencesToWrite] [ identifier[inferenceElement] ][ identifier[key] ]= identifier[prevInference]
keyword[else] :
identifier[inferencesToWrite] [ identifier[inferenceElement] ][ identifier[key] ]= keyword[None]
keyword[else] :
identifier[delay] = identifier[InferenceElement] . identifier[getTemporalDelay] ( identifier[inferenceElement] )
keyword[if] identifier[len] ( identifier[self] . identifier[_inferenceBuffer] )> identifier[delay] :
identifier[inferencesToWrite] [ identifier[inferenceElement] ]=(
identifier[self] . identifier[_inferenceBuffer] [ identifier[delay] ][ identifier[inferenceElement] ])
keyword[else] :
keyword[if] identifier[type] ( identifier[inference] ) keyword[in] ( identifier[list] , identifier[tuple] ):
identifier[inferencesToWrite] [ identifier[inferenceElement] ]=[ keyword[None] ]* identifier[len] ( identifier[inference] )
keyword[else] :
identifier[inferencesToWrite] [ identifier[inferenceElement] ]= keyword[None]
identifier[shiftedResult] = identifier[ModelResult] ( identifier[rawInput] = identifier[modelResult] . identifier[rawInput] ,
identifier[sensorInput] = identifier[modelResult] . identifier[sensorInput] ,
identifier[inferences] = identifier[inferencesToWrite] ,
identifier[metrics] = identifier[modelResult] . identifier[metrics] ,
identifier[predictedFieldIdx] = identifier[modelResult] . identifier[predictedFieldIdx] ,
identifier[predictedFieldName] = identifier[modelResult] . identifier[predictedFieldName] )
keyword[return] identifier[shiftedResult]
|
def shift(self, modelResult):
"""Shift the model result and return the new instance.
Queues up the T(i+1) prediction value and emits a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction pair.
:param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult`
instance to shift.
:return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that
has been shifted
"""
inferencesToWrite = {}
if self._inferenceBuffer is None:
maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)
self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1) # depends on [control=['if'], data=[]]
self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))
for (inferenceElement, inference) in modelResult.inferences.iteritems():
if isinstance(inference, dict):
inferencesToWrite[inferenceElement] = {}
for (key, _) in inference.iteritems():
delay = InferenceElement.getTemporalDelay(inferenceElement, key)
if len(self._inferenceBuffer) > delay:
prevInference = self._inferenceBuffer[delay][inferenceElement][key]
inferencesToWrite[inferenceElement][key] = prevInference # depends on [control=['if'], data=['delay']]
else:
inferencesToWrite[inferenceElement][key] = None # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
delay = InferenceElement.getTemporalDelay(inferenceElement)
if len(self._inferenceBuffer) > delay:
inferencesToWrite[inferenceElement] = self._inferenceBuffer[delay][inferenceElement] # depends on [control=['if'], data=['delay']]
elif type(inference) in (list, tuple):
inferencesToWrite[inferenceElement] = [None] * len(inference) # depends on [control=['if'], data=[]]
else:
inferencesToWrite[inferenceElement] = None # depends on [control=['for'], data=[]]
shiftedResult = ModelResult(rawInput=modelResult.rawInput, sensorInput=modelResult.sensorInput, inferences=inferencesToWrite, metrics=modelResult.metrics, predictedFieldIdx=modelResult.predictedFieldIdx, predictedFieldName=modelResult.predictedFieldName)
return shiftedResult
|
def readlines(self, timeout=1):
"""
read all lines that are available. abort after timeout
when no more data arrives.
"""
lines = []
while 1:
line = self.readline(timeout=timeout)
if line:
lines.append(line)
if not line or line[-1:] != '\n':
break
return lines
|
def function[readlines, parameter[self, timeout]]:
constant[
read all lines that are available. abort after timeout
when no more data arrives.
]
variable[lines] assign[=] list[[]]
while constant[1] begin[:]
variable[line] assign[=] call[name[self].readline, parameter[]]
if name[line] begin[:]
call[name[lines].append, parameter[name[line]]]
if <ast.BoolOp object at 0x7da1b0e14bb0> begin[:]
break
return[name[lines]]
|
keyword[def] identifier[readlines] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
identifier[lines] =[]
keyword[while] literal[int] :
identifier[line] = identifier[self] . identifier[readline] ( identifier[timeout] = identifier[timeout] )
keyword[if] identifier[line] :
identifier[lines] . identifier[append] ( identifier[line] )
keyword[if] keyword[not] identifier[line] keyword[or] identifier[line] [- literal[int] :]!= literal[string] :
keyword[break]
keyword[return] identifier[lines]
|
def readlines(self, timeout=1):
"""
read all lines that are available. abort after timeout
when no more data arrives.
"""
lines = []
while 1:
line = self.readline(timeout=timeout)
if line:
lines.append(line) # depends on [control=['if'], data=[]]
if not line or line[-1:] != '\n':
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return lines
|
def _Open(self, path_spec, mode='rb'):
"""Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb'
read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
fvde_volume = pyfvde.volume()
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
fvde.FVDEVolumeOpen(
fvde_volume, path_spec, file_object, resolver.Resolver.key_chain)
except:
file_object.close()
raise
self._fvde_volume = fvde_volume
self._file_object = file_object
|
def function[_Open, parameter[self, path_spec, mode]]:
constant[Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb'
read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
]
if <ast.UnaryOp object at 0x7da1b07f4a30> begin[:]
<ast.Raise object at 0x7da1b07f6620>
call[name[resolver].Resolver.key_chain.ExtractCredentialsFromPathSpec, parameter[name[path_spec]]]
variable[fvde_volume] assign[=] call[name[pyfvde].volume, parameter[]]
variable[file_object] assign[=] call[name[resolver].Resolver.OpenFileObject, parameter[name[path_spec].parent]]
<ast.Try object at 0x7da1b07f7430>
name[self]._fvde_volume assign[=] name[fvde_volume]
name[self]._file_object assign[=] name[file_object]
|
keyword[def] identifier[_Open] ( identifier[self] , identifier[path_spec] , identifier[mode] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[path_spec] . identifier[HasParent] ():
keyword[raise] identifier[errors] . identifier[PathSpecError] (
literal[string] )
identifier[resolver] . identifier[Resolver] . identifier[key_chain] . identifier[ExtractCredentialsFromPathSpec] ( identifier[path_spec] )
identifier[fvde_volume] = identifier[pyfvde] . identifier[volume] ()
identifier[file_object] = identifier[resolver] . identifier[Resolver] . identifier[OpenFileObject] (
identifier[path_spec] . identifier[parent] , identifier[resolver_context] = identifier[self] . identifier[_resolver_context] )
keyword[try] :
identifier[fvde] . identifier[FVDEVolumeOpen] (
identifier[fvde_volume] , identifier[path_spec] , identifier[file_object] , identifier[resolver] . identifier[Resolver] . identifier[key_chain] )
keyword[except] :
identifier[file_object] . identifier[close] ()
keyword[raise]
identifier[self] . identifier[_fvde_volume] = identifier[fvde_volume]
identifier[self] . identifier[_file_object] = identifier[file_object]
|
def _Open(self, path_spec, mode='rb'):
"""Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb'
read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError('Unsupported path specification without parent.') # depends on [control=['if'], data=[]]
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
fvde_volume = pyfvde.volume()
file_object = resolver.Resolver.OpenFileObject(path_spec.parent, resolver_context=self._resolver_context)
try:
fvde.FVDEVolumeOpen(fvde_volume, path_spec, file_object, resolver.Resolver.key_chain) # depends on [control=['try'], data=[]]
except:
file_object.close()
raise # depends on [control=['except'], data=[]]
self._fvde_volume = fvde_volume
self._file_object = file_object
|
def save_to_file(self, filename: str) -> ConfigFile:
"""
This converts the NetworkedConfigFile into a normal ConfigFile object.
This requires the normal class hooks to be provided.
"""
newclass = ConfigFile(fd=filename, load_hook=self.normal_class_hook[0],
dump_hook=self.normal_class_hook[1], safe_load=self.safe_load)
return newclass
|
def function[save_to_file, parameter[self, filename]]:
constant[
This converts the NetworkedConfigFile into a normal ConfigFile object.
This requires the normal class hooks to be provided.
]
variable[newclass] assign[=] call[name[ConfigFile], parameter[]]
return[name[newclass]]
|
keyword[def] identifier[save_to_file] ( identifier[self] , identifier[filename] : identifier[str] )-> identifier[ConfigFile] :
literal[string]
identifier[newclass] = identifier[ConfigFile] ( identifier[fd] = identifier[filename] , identifier[load_hook] = identifier[self] . identifier[normal_class_hook] [ literal[int] ],
identifier[dump_hook] = identifier[self] . identifier[normal_class_hook] [ literal[int] ], identifier[safe_load] = identifier[self] . identifier[safe_load] )
keyword[return] identifier[newclass]
|
def save_to_file(self, filename: str) -> ConfigFile:
"""
This converts the NetworkedConfigFile into a normal ConfigFile object.
This requires the normal class hooks to be provided.
"""
newclass = ConfigFile(fd=filename, load_hook=self.normal_class_hook[0], dump_hook=self.normal_class_hook[1], safe_load=self.safe_load)
return newclass
|
def AddCampaign(self, client_customer_id, campaign_name, ad_channel_type,
budget):
"""Add a Campaign to the client account.
Args:
client_customer_id: str Client Customer Id to use when creating Campaign.
campaign_name: str Name of the campaign to be added.
ad_channel_type: str Primary serving target the campaign's ads.
budget: str a budget amount (in micros) to use.
"""
self.client.SetClientCustomerId(client_customer_id)
campaign_service = self.client.GetService('CampaignService')
budget_id = self.AddBudget(client_customer_id, budget)
operations = [{
'operator': 'ADD',
'operand': {
'name': campaign_name,
'status': 'PAUSED',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC',
'biddingScheme': {
'xsi_type': 'ManualCpcBiddingScheme',
'enhancedCpcEnabled': 'false'
}
},
'budget': {
'budgetId': budget_id
},
'advertisingChannelType': ad_channel_type
}
}]
campaign_service.mutate(operations)
|
def function[AddCampaign, parameter[self, client_customer_id, campaign_name, ad_channel_type, budget]]:
constant[Add a Campaign to the client account.
Args:
client_customer_id: str Client Customer Id to use when creating Campaign.
campaign_name: str Name of the campaign to be added.
ad_channel_type: str Primary serving target the campaign's ads.
budget: str a budget amount (in micros) to use.
]
call[name[self].client.SetClientCustomerId, parameter[name[client_customer_id]]]
variable[campaign_service] assign[=] call[name[self].client.GetService, parameter[constant[CampaignService]]]
variable[budget_id] assign[=] call[name[self].AddBudget, parameter[name[client_customer_id], name[budget]]]
variable[operations] assign[=] list[[<ast.Dict object at 0x7da1b1b0e4d0>]]
call[name[campaign_service].mutate, parameter[name[operations]]]
|
keyword[def] identifier[AddCampaign] ( identifier[self] , identifier[client_customer_id] , identifier[campaign_name] , identifier[ad_channel_type] ,
identifier[budget] ):
literal[string]
identifier[self] . identifier[client] . identifier[SetClientCustomerId] ( identifier[client_customer_id] )
identifier[campaign_service] = identifier[self] . identifier[client] . identifier[GetService] ( literal[string] )
identifier[budget_id] = identifier[self] . identifier[AddBudget] ( identifier[client_customer_id] , identifier[budget] )
identifier[operations] =[{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[campaign_name] ,
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string]
}
},
literal[string] :{
literal[string] : identifier[budget_id]
},
literal[string] : identifier[ad_channel_type]
}
}]
identifier[campaign_service] . identifier[mutate] ( identifier[operations] )
|
def AddCampaign(self, client_customer_id, campaign_name, ad_channel_type, budget):
"""Add a Campaign to the client account.
Args:
client_customer_id: str Client Customer Id to use when creating Campaign.
campaign_name: str Name of the campaign to be added.
ad_channel_type: str Primary serving target the campaign's ads.
budget: str a budget amount (in micros) to use.
"""
self.client.SetClientCustomerId(client_customer_id)
campaign_service = self.client.GetService('CampaignService')
budget_id = self.AddBudget(client_customer_id, budget)
operations = [{'operator': 'ADD', 'operand': {'name': campaign_name, 'status': 'PAUSED', 'biddingStrategyConfiguration': {'biddingStrategyType': 'MANUAL_CPC', 'biddingScheme': {'xsi_type': 'ManualCpcBiddingScheme', 'enhancedCpcEnabled': 'false'}}, 'budget': {'budgetId': budget_id}, 'advertisingChannelType': ad_channel_type}}]
campaign_service.mutate(operations)
|
def edit_message_text(self, text, chat_id=None, message_id=None, inline_message_id=None, parse_mode=None, disable_web_page_preview=None, reply_markup=None):
"""
Use this method to edit text and game messages sent by the bot or via the bot (for inline bots). On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned.
https://core.telegram.org/bots/api#editmessagetext
Parameters:
:param text: New text of the message
:type text: str|unicode
Optional keyword parameters:
:param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:type message_id: int
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:type inline_message_id: str|unicode
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message.
:type parse_mode: str|unicode
:param disable_web_page_preview: Disables link previews for links in this message
:type disable_web_page_preview: bool
:param reply_markup: A JSON-serialized object for an inline keyboard.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
Returns:
:return: On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned
:rtype: pytgbot.api_types.receivable.updates.Message | bool
"""
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
assert_type_or_raise(text, unicode_type, parameter_name="text")
assert_type_or_raise(chat_id, None, (int, unicode_type), parameter_name="chat_id")
assert_type_or_raise(message_id, None, int, parameter_name="message_id")
assert_type_or_raise(inline_message_id, None, unicode_type, parameter_name="inline_message_id")
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
assert_type_or_raise(disable_web_page_preview, None, bool, parameter_name="disable_web_page_preview")
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, parameter_name="reply_markup")
result = self.do("editMessageText", text=text, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, reply_markup=reply_markup)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.updates import Message
try:
return Message.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type Message", exc_info=True)
# end try
try:
return from_array_list(bool, result, list_level=0, is_builtin=True)
except TgApiParseException:
logger.debug("Failed parsing as primitive bool", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result
|
def function[edit_message_text, parameter[self, text, chat_id, message_id, inline_message_id, parse_mode, disable_web_page_preview, reply_markup]]:
constant[
Use this method to edit text and game messages sent by the bot or via the bot (for inline bots). On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned.
https://core.telegram.org/bots/api#editmessagetext
Parameters:
:param text: New text of the message
:type text: str|unicode
Optional keyword parameters:
:param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:type message_id: int
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:type inline_message_id: str|unicode
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message.
:type parse_mode: str|unicode
:param disable_web_page_preview: Disables link previews for links in this message
:type disable_web_page_preview: bool
:param reply_markup: A JSON-serialized object for an inline keyboard.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
Returns:
:return: On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned
:rtype: pytgbot.api_types.receivable.updates.Message | bool
]
from relative_module[pytgbot.api_types.sendable.reply_markup] import module[InlineKeyboardMarkup]
call[name[assert_type_or_raise], parameter[name[text], name[unicode_type]]]
call[name[assert_type_or_raise], parameter[name[chat_id], constant[None], tuple[[<ast.Name object at 0x7da1b04d5c30>, <ast.Name object at 0x7da1b04d5c60>]]]]
call[name[assert_type_or_raise], parameter[name[message_id], constant[None], name[int]]]
call[name[assert_type_or_raise], parameter[name[inline_message_id], constant[None], name[unicode_type]]]
call[name[assert_type_or_raise], parameter[name[parse_mode], constant[None], name[unicode_type]]]
call[name[assert_type_or_raise], parameter[name[disable_web_page_preview], constant[None], name[bool]]]
call[name[assert_type_or_raise], parameter[name[reply_markup], constant[None], name[InlineKeyboardMarkup]]]
variable[result] assign[=] call[name[self].do, parameter[constant[editMessageText]]]
if name[self].return_python_objects begin[:]
call[name[logger].debug, parameter[call[constant[Trying to parse {data}].format, parameter[]]]]
from relative_module[pytgbot.api_types.receivable.updates] import module[Message]
<ast.Try object at 0x7da1b04d5330>
<ast.Try object at 0x7da1b04d7370>
<ast.Raise object at 0x7da1b04d6ec0>
return[name[result]]
|
keyword[def] identifier[edit_message_text] ( identifier[self] , identifier[text] , identifier[chat_id] = keyword[None] , identifier[message_id] = keyword[None] , identifier[inline_message_id] = keyword[None] , identifier[parse_mode] = keyword[None] , identifier[disable_web_page_preview] = keyword[None] , identifier[reply_markup] = keyword[None] ):
literal[string]
keyword[from] identifier[pytgbot] . identifier[api_types] . identifier[sendable] . identifier[reply_markup] keyword[import] identifier[InlineKeyboardMarkup]
identifier[assert_type_or_raise] ( identifier[text] , identifier[unicode_type] , identifier[parameter_name] = literal[string] )
identifier[assert_type_or_raise] ( identifier[chat_id] , keyword[None] ,( identifier[int] , identifier[unicode_type] ), identifier[parameter_name] = literal[string] )
identifier[assert_type_or_raise] ( identifier[message_id] , keyword[None] , identifier[int] , identifier[parameter_name] = literal[string] )
identifier[assert_type_or_raise] ( identifier[inline_message_id] , keyword[None] , identifier[unicode_type] , identifier[parameter_name] = literal[string] )
identifier[assert_type_or_raise] ( identifier[parse_mode] , keyword[None] , identifier[unicode_type] , identifier[parameter_name] = literal[string] )
identifier[assert_type_or_raise] ( identifier[disable_web_page_preview] , keyword[None] , identifier[bool] , identifier[parameter_name] = literal[string] )
identifier[assert_type_or_raise] ( identifier[reply_markup] , keyword[None] , identifier[InlineKeyboardMarkup] , identifier[parameter_name] = literal[string] )
identifier[result] = identifier[self] . identifier[do] ( literal[string] , identifier[text] = identifier[text] , identifier[chat_id] = identifier[chat_id] , identifier[message_id] = identifier[message_id] , identifier[inline_message_id] = identifier[inline_message_id] , identifier[parse_mode] = identifier[parse_mode] , identifier[disable_web_page_preview] = identifier[disable_web_page_preview] , identifier[reply_markup] = identifier[reply_markup] )
keyword[if] identifier[self] . identifier[return_python_objects] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[data] = identifier[repr] ( identifier[result] )))
keyword[from] identifier[pytgbot] . identifier[api_types] . identifier[receivable] . identifier[updates] keyword[import] identifier[Message]
keyword[try] :
keyword[return] identifier[Message] . identifier[from_array] ( identifier[result] )
keyword[except] identifier[TgApiParseException] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[exc_info] = keyword[True] )
keyword[try] :
keyword[return] identifier[from_array_list] ( identifier[bool] , identifier[result] , identifier[list_level] = literal[int] , identifier[is_builtin] = keyword[True] )
keyword[except] identifier[TgApiParseException] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[exc_info] = keyword[True] )
keyword[raise] identifier[TgApiParseException] ( literal[string] )
keyword[return] identifier[result]
|
def edit_message_text(self, text, chat_id=None, message_id=None, inline_message_id=None, parse_mode=None, disable_web_page_preview=None, reply_markup=None):
"""
Use this method to edit text and game messages sent by the bot or via the bot (for inline bots). On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned.
https://core.telegram.org/bots/api#editmessagetext
Parameters:
:param text: New text of the message
:type text: str|unicode
Optional keyword parameters:
:param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:type message_id: int
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:type inline_message_id: str|unicode
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message.
:type parse_mode: str|unicode
:param disable_web_page_preview: Disables link previews for links in this message
:type disable_web_page_preview: bool
:param reply_markup: A JSON-serialized object for an inline keyboard.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
Returns:
:return: On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned
:rtype: pytgbot.api_types.receivable.updates.Message | bool
"""
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
assert_type_or_raise(text, unicode_type, parameter_name='text')
assert_type_or_raise(chat_id, None, (int, unicode_type), parameter_name='chat_id')
assert_type_or_raise(message_id, None, int, parameter_name='message_id')
assert_type_or_raise(inline_message_id, None, unicode_type, parameter_name='inline_message_id')
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name='parse_mode')
assert_type_or_raise(disable_web_page_preview, None, bool, parameter_name='disable_web_page_preview')
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, parameter_name='reply_markup')
result = self.do('editMessageText', text=text, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, reply_markup=reply_markup)
if self.return_python_objects:
logger.debug('Trying to parse {data}'.format(data=repr(result)))
from pytgbot.api_types.receivable.updates import Message
try:
return Message.from_array(result) # depends on [control=['try'], data=[]]
except TgApiParseException:
logger.debug('Failed parsing as api_type Message', exc_info=True) # depends on [control=['except'], data=[]]
# end try
try:
return from_array_list(bool, result, list_level=0, is_builtin=True) # depends on [control=['try'], data=[]]
except TgApiParseException:
logger.debug('Failed parsing as primitive bool', exc_info=True) # depends on [control=['except'], data=[]]
# end try
# no valid parsing so far
raise TgApiParseException('Could not parse result.') # See debug log for details! # depends on [control=['if'], data=[]]
# end if return_python_objects
return result
|
def get_cached(location, **kwargs):
"""
Simple wrapper that adds Django caching support to 'geocoder.get()'.
"""
result = cache.get(location)
# Result is not cached or wrong
if not result or not result.ok:
result = geocoder.get(location, **kwargs)
if result.ok:
cache.set(location, result)
return result
|
def function[get_cached, parameter[location]]:
constant[
Simple wrapper that adds Django caching support to 'geocoder.get()'.
]
variable[result] assign[=] call[name[cache].get, parameter[name[location]]]
if <ast.BoolOp object at 0x7da1b2524310> begin[:]
variable[result] assign[=] call[name[geocoder].get, parameter[name[location]]]
if name[result].ok begin[:]
call[name[cache].set, parameter[name[location], name[result]]]
return[name[result]]
|
keyword[def] identifier[get_cached] ( identifier[location] ,** identifier[kwargs] ):
literal[string]
identifier[result] = identifier[cache] . identifier[get] ( identifier[location] )
keyword[if] keyword[not] identifier[result] keyword[or] keyword[not] identifier[result] . identifier[ok] :
identifier[result] = identifier[geocoder] . identifier[get] ( identifier[location] ,** identifier[kwargs] )
keyword[if] identifier[result] . identifier[ok] :
identifier[cache] . identifier[set] ( identifier[location] , identifier[result] )
keyword[return] identifier[result]
|
def get_cached(location, **kwargs):
"""
Simple wrapper that adds Django caching support to 'geocoder.get()'.
"""
result = cache.get(location)
# Result is not cached or wrong
if not result or not result.ok:
result = geocoder.get(location, **kwargs)
if result.ok:
cache.set(location, result) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return result
|
def rotate_z(self, angle):
"""
Rotates mesh about the z-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the z-axis.
"""
axis_rotation(self.points, angle, inplace=True, axis='z')
|
def function[rotate_z, parameter[self, angle]]:
constant[
Rotates mesh about the z-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the z-axis.
]
call[name[axis_rotation], parameter[name[self].points, name[angle]]]
|
keyword[def] identifier[rotate_z] ( identifier[self] , identifier[angle] ):
literal[string]
identifier[axis_rotation] ( identifier[self] . identifier[points] , identifier[angle] , identifier[inplace] = keyword[True] , identifier[axis] = literal[string] )
|
def rotate_z(self, angle):
"""
Rotates mesh about the z-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the z-axis.
"""
axis_rotation(self.points, angle, inplace=True, axis='z')
|
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
|
def function[tie_weights, parameter[self]]:
constant[ Run this to be sure output and input (adaptive) softmax weights are tied ]
if compare[name[self].sample_softmax greater[>] constant[0]] begin[:]
if name[self].config.tie_weight begin[:]
name[self].out_layer.weight assign[=] name[self].transformer.word_emb.weight
|
keyword[def] identifier[tie_weights] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[sample_softmax] > literal[int] :
keyword[if] identifier[self] . identifier[config] . identifier[tie_weight] :
identifier[self] . identifier[out_layer] . identifier[weight] = identifier[self] . identifier[transformer] . identifier[word_emb] . identifier[weight]
keyword[else] :
keyword[if] identifier[self] . identifier[config] . identifier[tie_weight] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[crit] . identifier[out_layers] )):
identifier[self] . identifier[crit] . identifier[out_layers] [ identifier[i] ]. identifier[weight] = identifier[self] . identifier[transformer] . identifier[word_emb] . identifier[emb_layers] [ identifier[i] ]. identifier[weight]
keyword[if] identifier[self] . identifier[config] . identifier[tie_projs] :
keyword[for] identifier[i] , identifier[tie_proj] keyword[in] identifier[enumerate] ( identifier[self] . identifier[config] . identifier[tie_projs] ):
keyword[if] identifier[tie_proj] keyword[and] identifier[self] . identifier[config] . identifier[div_val] == literal[int] keyword[and] identifier[self] . identifier[config] . identifier[d_model] != identifier[self] . identifier[config] . identifier[d_embed] :
identifier[self] . identifier[crit] . identifier[out_projs] [ identifier[i] ]= identifier[self] . identifier[transformer] . identifier[word_emb] . identifier[emb_projs] [ literal[int] ]
keyword[elif] identifier[tie_proj] keyword[and] identifier[self] . identifier[config] . identifier[div_val] != literal[int] :
identifier[self] . identifier[crit] . identifier[out_projs] [ identifier[i] ]= identifier[self] . identifier[transformer] . identifier[word_emb] . identifier[emb_projs] [ identifier[i] ]
|
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# adaptive softmax (including standard softmax)
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
if self.config.tie_projs:
for (i, tie_proj) in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and (self.config.d_model != self.config.d_embed):
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] # depends on [control=['if'], data=[]]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
|
def register_frontend_media(request, media):
"""
Add a :class:`~django.forms.Media` class to the current request.
This will be rendered by the ``render_plugin_media`` template tag.
"""
if not hasattr(request, '_fluent_contents_frontend_media'):
request._fluent_contents_frontend_media = Media()
add_media(request._fluent_contents_frontend_media, media)
|
def function[register_frontend_media, parameter[request, media]]:
constant[
Add a :class:`~django.forms.Media` class to the current request.
This will be rendered by the ``render_plugin_media`` template tag.
]
if <ast.UnaryOp object at 0x7da1b10e5450> begin[:]
name[request]._fluent_contents_frontend_media assign[=] call[name[Media], parameter[]]
call[name[add_media], parameter[name[request]._fluent_contents_frontend_media, name[media]]]
|
keyword[def] identifier[register_frontend_media] ( identifier[request] , identifier[media] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[request] , literal[string] ):
identifier[request] . identifier[_fluent_contents_frontend_media] = identifier[Media] ()
identifier[add_media] ( identifier[request] . identifier[_fluent_contents_frontend_media] , identifier[media] )
|
def register_frontend_media(request, media):
"""
Add a :class:`~django.forms.Media` class to the current request.
This will be rendered by the ``render_plugin_media`` template tag.
"""
if not hasattr(request, '_fluent_contents_frontend_media'):
request._fluent_contents_frontend_media = Media() # depends on [control=['if'], data=[]]
add_media(request._fluent_contents_frontend_media, media)
|
def initialize_sentry_integration(): # pragma: no cover
"""\
Used to optionally initialize the Sentry service with this app.
See https://docs.sentry.io/platforms/python/pyramid/
"""
# This function is not under coverage because it is boilerplate
# from the Sentry documentation.
try:
import sentry_sdk
from sentry_sdk.integrations.pyramid import PyramidIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
except ImportError:
warnings.warn(
"Sentry is not configured because the Sentry SDK "
"(sentry_sdk package) is not installed",
UserWarning,
)
return # bail out early
try:
dsn = os.environ['SENTRY_DSN']
except KeyError:
warnings.warn(
"Sentry is not configured because SENTRY_DSN "
"was not supplied.",
UserWarning,
)
else:
sentry_sdk.init(
dsn=dsn,
integrations=[PyramidIntegration(), CeleryIntegration()],
)
|
def function[initialize_sentry_integration, parameter[]]:
constant[ Used to optionally initialize the Sentry service with this app.
See https://docs.sentry.io/platforms/python/pyramid/
]
<ast.Try object at 0x7da1b003d960>
<ast.Try object at 0x7da1b003c700>
|
keyword[def] identifier[initialize_sentry_integration] ():
literal[string]
keyword[try] :
keyword[import] identifier[sentry_sdk]
keyword[from] identifier[sentry_sdk] . identifier[integrations] . identifier[pyramid] keyword[import] identifier[PyramidIntegration]
keyword[from] identifier[sentry_sdk] . identifier[integrations] . identifier[celery] keyword[import] identifier[CeleryIntegration]
keyword[except] identifier[ImportError] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string] ,
identifier[UserWarning] ,
)
keyword[return]
keyword[try] :
identifier[dsn] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string] ,
identifier[UserWarning] ,
)
keyword[else] :
identifier[sentry_sdk] . identifier[init] (
identifier[dsn] = identifier[dsn] ,
identifier[integrations] =[ identifier[PyramidIntegration] (), identifier[CeleryIntegration] ()],
)
|
def initialize_sentry_integration(): # pragma: no cover
' Used to optionally initialize the Sentry service with this app.\n See https://docs.sentry.io/platforms/python/pyramid/\n\n '
# This function is not under coverage because it is boilerplate
# from the Sentry documentation.
try:
import sentry_sdk
from sentry_sdk.integrations.pyramid import PyramidIntegration
from sentry_sdk.integrations.celery import CeleryIntegration # depends on [control=['try'], data=[]]
except ImportError:
warnings.warn('Sentry is not configured because the Sentry SDK (sentry_sdk package) is not installed', UserWarning)
return # bail out early # depends on [control=['except'], data=[]]
try:
dsn = os.environ['SENTRY_DSN'] # depends on [control=['try'], data=[]]
except KeyError:
warnings.warn('Sentry is not configured because SENTRY_DSN was not supplied.', UserWarning) # depends on [control=['except'], data=[]]
else:
sentry_sdk.init(dsn=dsn, integrations=[PyramidIntegration(), CeleryIntegration()])
|
def fingerprint_from_var(var):
"""Extract a fingerprint from a GPG public key"""
vsn = gpg_version()
cmd = flatten([gnupg_bin(), gnupg_home()])
if vsn[0] >= 2 and vsn[1] < 1:
cmd.append("--with-fingerprint")
output = polite_string(stderr_with_input(cmd, var)).split('\n')
if not output[0].startswith('pub'):
raise CryptoritoError('probably an invalid gpg key')
if vsn[0] >= 2 and vsn[1] < 1:
return output[1] \
.split('=')[1] \
.replace(' ', '')
return output[1].strip()
|
def function[fingerprint_from_var, parameter[var]]:
constant[Extract a fingerprint from a GPG public key]
variable[vsn] assign[=] call[name[gpg_version], parameter[]]
variable[cmd] assign[=] call[name[flatten], parameter[list[[<ast.Call object at 0x7da1b18e4190>, <ast.Call object at 0x7da1b18e5c60>]]]]
if <ast.BoolOp object at 0x7da1b18e7250> begin[:]
call[name[cmd].append, parameter[constant[--with-fingerprint]]]
variable[output] assign[=] call[call[name[polite_string], parameter[call[name[stderr_with_input], parameter[name[cmd], name[var]]]]].split, parameter[constant[
]]]
if <ast.UnaryOp object at 0x7da1b18e67a0> begin[:]
<ast.Raise object at 0x7da1b18e70d0>
if <ast.BoolOp object at 0x7da1b18e53f0> begin[:]
return[call[call[call[call[name[output]][constant[1]].split, parameter[constant[=]]]][constant[1]].replace, parameter[constant[ ], constant[]]]]
return[call[call[name[output]][constant[1]].strip, parameter[]]]
|
keyword[def] identifier[fingerprint_from_var] ( identifier[var] ):
literal[string]
identifier[vsn] = identifier[gpg_version] ()
identifier[cmd] = identifier[flatten] ([ identifier[gnupg_bin] (), identifier[gnupg_home] ()])
keyword[if] identifier[vsn] [ literal[int] ]>= literal[int] keyword[and] identifier[vsn] [ literal[int] ]< literal[int] :
identifier[cmd] . identifier[append] ( literal[string] )
identifier[output] = identifier[polite_string] ( identifier[stderr_with_input] ( identifier[cmd] , identifier[var] )). identifier[split] ( literal[string] )
keyword[if] keyword[not] identifier[output] [ literal[int] ]. identifier[startswith] ( literal[string] ):
keyword[raise] identifier[CryptoritoError] ( literal[string] )
keyword[if] identifier[vsn] [ literal[int] ]>= literal[int] keyword[and] identifier[vsn] [ literal[int] ]< literal[int] :
keyword[return] identifier[output] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[output] [ literal[int] ]. identifier[strip] ()
|
def fingerprint_from_var(var):
"""Extract a fingerprint from a GPG public key"""
vsn = gpg_version()
cmd = flatten([gnupg_bin(), gnupg_home()])
if vsn[0] >= 2 and vsn[1] < 1:
cmd.append('--with-fingerprint') # depends on [control=['if'], data=[]]
output = polite_string(stderr_with_input(cmd, var)).split('\n')
if not output[0].startswith('pub'):
raise CryptoritoError('probably an invalid gpg key') # depends on [control=['if'], data=[]]
if vsn[0] >= 2 and vsn[1] < 1:
return output[1].split('=')[1].replace(' ', '') # depends on [control=['if'], data=[]]
return output[1].strip()
|
def remove_datasource(jboss_config, name, profile=None):
'''
Remove an existing datasource from the running jboss instance.
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
profile
The profile (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.remove_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_datasource_name
'''
log.debug("======================== MODULE FUNCTION: jboss7.remove_datasource, name=%s, profile=%s", name, profile)
operation = '/subsystem=datasources/data-source={name}:remove'.format(name=name)
if profile is not None:
operation = '/profile="{profile}"'.format(profile=profile) + operation
return __salt__['jboss7_cli.run_operation'](jboss_config, operation, fail_on_error=False)
|
def function[remove_datasource, parameter[jboss_config, name, profile]]:
constant[
Remove an existing datasource from the running jboss instance.
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
profile
The profile (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.remove_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_datasource_name
]
call[name[log].debug, parameter[constant[======================== MODULE FUNCTION: jboss7.remove_datasource, name=%s, profile=%s], name[name], name[profile]]]
variable[operation] assign[=] call[constant[/subsystem=datasources/data-source={name}:remove].format, parameter[]]
if compare[name[profile] is_not constant[None]] begin[:]
variable[operation] assign[=] binary_operation[call[constant[/profile="{profile}"].format, parameter[]] + name[operation]]
return[call[call[name[__salt__]][constant[jboss7_cli.run_operation]], parameter[name[jboss_config], name[operation]]]]
|
keyword[def] identifier[remove_datasource] ( identifier[jboss_config] , identifier[name] , identifier[profile] = keyword[None] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[name] , identifier[profile] )
identifier[operation] = literal[string] . identifier[format] ( identifier[name] = identifier[name] )
keyword[if] identifier[profile] keyword[is] keyword[not] keyword[None] :
identifier[operation] = literal[string] . identifier[format] ( identifier[profile] = identifier[profile] )+ identifier[operation]
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[jboss_config] , identifier[operation] , identifier[fail_on_error] = keyword[False] )
|
def remove_datasource(jboss_config, name, profile=None):
"""
Remove an existing datasource from the running jboss instance.
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
profile
The profile (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.remove_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_datasource_name
"""
log.debug('======================== MODULE FUNCTION: jboss7.remove_datasource, name=%s, profile=%s', name, profile)
operation = '/subsystem=datasources/data-source={name}:remove'.format(name=name)
if profile is not None:
operation = '/profile="{profile}"'.format(profile=profile) + operation # depends on [control=['if'], data=['profile']]
return __salt__['jboss7_cli.run_operation'](jboss_config, operation, fail_on_error=False)
|
def getVMstats(self):
"""Return stats for Virtual Memory Subsystem.
@return: Dictionary of stats.
"""
info_dict = {}
try:
fp = open(vmstatFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading stats from file: %s' % vmstatFile)
for line in data.splitlines():
cols = line.split()
if len(cols) == 2:
info_dict[cols[0]] = cols[1]
return info_dict
|
def function[getVMstats, parameter[self]]:
constant[Return stats for Virtual Memory Subsystem.
@return: Dictionary of stats.
]
variable[info_dict] assign[=] dictionary[[], []]
<ast.Try object at 0x7da2054a45e0>
for taget[name[line]] in starred[call[name[data].splitlines, parameter[]]] begin[:]
variable[cols] assign[=] call[name[line].split, parameter[]]
if compare[call[name[len], parameter[name[cols]]] equal[==] constant[2]] begin[:]
call[name[info_dict]][call[name[cols]][constant[0]]] assign[=] call[name[cols]][constant[1]]
return[name[info_dict]]
|
keyword[def] identifier[getVMstats] ( identifier[self] ):
literal[string]
identifier[info_dict] ={}
keyword[try] :
identifier[fp] = identifier[open] ( identifier[vmstatFile] , literal[string] )
identifier[data] = identifier[fp] . identifier[read] ()
identifier[fp] . identifier[close] ()
keyword[except] :
keyword[raise] identifier[IOError] ( literal[string] % identifier[vmstatFile] )
keyword[for] identifier[line] keyword[in] identifier[data] . identifier[splitlines] ():
identifier[cols] = identifier[line] . identifier[split] ()
keyword[if] identifier[len] ( identifier[cols] )== literal[int] :
identifier[info_dict] [ identifier[cols] [ literal[int] ]]= identifier[cols] [ literal[int] ]
keyword[return] identifier[info_dict]
|
def getVMstats(self):
"""Return stats for Virtual Memory Subsystem.
@return: Dictionary of stats.
"""
info_dict = {}
try:
fp = open(vmstatFile, 'r')
data = fp.read()
fp.close() # depends on [control=['try'], data=[]]
except:
raise IOError('Failed reading stats from file: %s' % vmstatFile) # depends on [control=['except'], data=[]]
for line in data.splitlines():
cols = line.split()
if len(cols) == 2:
info_dict[cols[0]] = cols[1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return info_dict
|
def _preprocess(self, data, train):
"""Zero-mean, unit-variance normalization by default"""
if train:
inputs, labels = data
self.data_mean = inputs.mean(axis=0)
self.data_std = inputs.std(axis=0)
self.labels_mean = labels.mean(axis=0)
self.labels_std = labels.std(axis=0)
return ((inputs-self.data_mean)/self.data_std, (labels-self.labels_mean)/self.labels_std)
else:
return (data-self.data_mean)/self.data_std
|
def function[_preprocess, parameter[self, data, train]]:
constant[Zero-mean, unit-variance normalization by default]
if name[train] begin[:]
<ast.Tuple object at 0x7da1b1ceec50> assign[=] name[data]
name[self].data_mean assign[=] call[name[inputs].mean, parameter[]]
name[self].data_std assign[=] call[name[inputs].std, parameter[]]
name[self].labels_mean assign[=] call[name[labels].mean, parameter[]]
name[self].labels_std assign[=] call[name[labels].std, parameter[]]
return[tuple[[<ast.BinOp object at 0x7da1b1cee470>, <ast.BinOp object at 0x7da1b1ceece0>]]]
|
keyword[def] identifier[_preprocess] ( identifier[self] , identifier[data] , identifier[train] ):
literal[string]
keyword[if] identifier[train] :
identifier[inputs] , identifier[labels] = identifier[data]
identifier[self] . identifier[data_mean] = identifier[inputs] . identifier[mean] ( identifier[axis] = literal[int] )
identifier[self] . identifier[data_std] = identifier[inputs] . identifier[std] ( identifier[axis] = literal[int] )
identifier[self] . identifier[labels_mean] = identifier[labels] . identifier[mean] ( identifier[axis] = literal[int] )
identifier[self] . identifier[labels_std] = identifier[labels] . identifier[std] ( identifier[axis] = literal[int] )
keyword[return] (( identifier[inputs] - identifier[self] . identifier[data_mean] )/ identifier[self] . identifier[data_std] ,( identifier[labels] - identifier[self] . identifier[labels_mean] )/ identifier[self] . identifier[labels_std] )
keyword[else] :
keyword[return] ( identifier[data] - identifier[self] . identifier[data_mean] )/ identifier[self] . identifier[data_std]
|
def _preprocess(self, data, train):
"""Zero-mean, unit-variance normalization by default"""
if train:
(inputs, labels) = data
self.data_mean = inputs.mean(axis=0)
self.data_std = inputs.std(axis=0)
self.labels_mean = labels.mean(axis=0)
self.labels_std = labels.std(axis=0)
return ((inputs - self.data_mean) / self.data_std, (labels - self.labels_mean) / self.labels_std) # depends on [control=['if'], data=[]]
else:
return (data - self.data_mean) / self.data_std
|
def rename_file(self, relativePath, newRelativePath,
force=False, raiseError=True, ntrials=3):
"""
Rename a file in the repository. It insures renaming the file in the system.
:Parameters:
#. relativePath (string): The relative to the repository path of
the file that needst to be renamed.
#. newRelativePath (string): The new relative to the repository path
of where to move and rename the file.
#. force (boolean): Whether to force renaming even when another
repository file exists. In this case old repository file
will be removed from the repository and the system as well.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether renaming the file was successful.
#. message (None, string): Some explanatory message or error reason
why directory was not updated.
"""
assert isinstance(raiseError, bool), "raiseError must be boolean"
assert isinstance(force, bool), "force must be boolean"
assert isinstance(ntrials, int), "ntrials must be integer"
assert ntrials>0, "ntrials must be >0"
# check old name and path
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
realPath = os.path.join(self.__path,relativePath)
fPath, fName = os.path.split(realPath)
# check new name and path
newRelativePath = self.to_repo_relative_path(path=newRelativePath, split=False)
newRealPath = os.path.join(self.__path,newRelativePath)
nfPath, nfName = os.path.split(newRealPath)
# lock old file
LO = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))
acquired, code = LO.acquire_lock()
if not acquired:
error = "Code %s. Unable to aquire the lock for old file '%s'"%(code,relativePath)
assert not raiseError, error
return False, error
# add directory
try:
success, reason = self.add_directory(nfPath, raiseError=False, ntrials=ntrials)
except Exception as err:
reason = "Unable to add directory (%s)"%(str(err))
success = False
if not success:
LO.release_lock()
assert not raiseError, reason
return False, reason
# create new file lock
LN = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(nfPath,self.__fileLock%nfName))
acquired, code = LN.acquire_lock()
if not acquired:
LO.release_lock()
error = "Code %s. Unable to aquire the lock for new file path '%s'"%(code,newRelativePath)
assert not raiseError, error
return False, error
# rename file
for _trial in range(ntrials):
renamed = False
error = None
try:
# check whether it's a repository file
isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)
assert isRepoFile, "file '%s' is not a repository file"%(relativePath,)
assert fileOnDisk, "file '%s' is found on disk"%(relativePath,)
assert infoOnDisk, "%s is found on disk"%self.__fileInfo%fName
assert classOnDisk, "%s is found on disk"%self.__fileClass%fName
# get new file path
nisRepoFile,nfileOnDisk,ninfoOnDisk,nclassOnDisk = self.is_repository_file(newRelativePath)
assert not nisRepoFile or force, "New file path is a registered repository file, set force to True to proceed regardless"
# get parent directories list
oDirList = self.__get_repository_directory(fPath)
nDirList = self.__get_repository_directory(nfPath)
# remove new file and all repository files from disk
if os.path.isfile(newRealPath):
os.remove(newRealPath)
if os.path.isfile(os.path.join(nfPath,self.__fileInfo%nfName)):
os.remove(os.path.join(nfPath,self.__fileInfo%nfName))
if os.path.isfile(os.path.join(nfPath,self.__fileClass%nfName)):
os.remove(os.path.join(nfPath,self.__fileClass%nfName))
# move old file to new path
os.rename(realPath, newRealPath)
os.rename(os.path.join(fPath,self.__fileInfo%fName), os.path.join(nfPath,self.__fileInfo%nfName))
os.rename(os.path.join(fPath,self.__fileClass%fName), os.path.join(nfPath,self.__fileClass%nfName))
# update list
findex = oDirList.index(fName)
oDirList.pop(findex)
# update new list
if nfName not in nDirList:
nDirList.append(nfName)
except Exception as err:
renamed = False
error = str(err)
if self.DEBUG_PRINT_FAILED_TRIALS: print("Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute"%(_trial, inspect.stack()[1][3], str(error)))
else:
renamed = True
break
# release locks
LO.release_lock()
LN.release_lock()
# always clean old file lock
try:
if os.path.isfile(os.path.join(fPath,self.__fileLock%fName)):
os.remove(os.path.join(fPath,self.__fileLock%fName))
except:
pass
# return
assert renamed or not raiseError, "Unable to rename file '%s' to '%s' after %i trials (%s)"%(relativePath, newRelativePath, ntrials, error,)
#assert renamed or not raiseError, '\n'.join(message)
return renamed, error
|
def function[rename_file, parameter[self, relativePath, newRelativePath, force, raiseError, ntrials]]:
constant[
Rename a file in the repository. It insures renaming the file in the system.
:Parameters:
#. relativePath (string): The relative to the repository path of
the file that needst to be renamed.
#. newRelativePath (string): The new relative to the repository path
of where to move and rename the file.
#. force (boolean): Whether to force renaming even when another
repository file exists. In this case old repository file
will be removed from the repository and the system as well.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether renaming the file was successful.
#. message (None, string): Some explanatory message or error reason
why directory was not updated.
]
assert[call[name[isinstance], parameter[name[raiseError], name[bool]]]]
assert[call[name[isinstance], parameter[name[force], name[bool]]]]
assert[call[name[isinstance], parameter[name[ntrials], name[int]]]]
assert[compare[name[ntrials] greater[>] constant[0]]]
variable[relativePath] assign[=] call[name[self].to_repo_relative_path, parameter[]]
variable[realPath] assign[=] call[name[os].path.join, parameter[name[self].__path, name[relativePath]]]
<ast.Tuple object at 0x7da1b021c250> assign[=] call[name[os].path.split, parameter[name[realPath]]]
variable[newRelativePath] assign[=] call[name[self].to_repo_relative_path, parameter[]]
variable[newRealPath] assign[=] call[name[os].path.join, parameter[name[self].__path, name[newRelativePath]]]
<ast.Tuple object at 0x7da1b021e890> assign[=] call[name[os].path.split, parameter[name[newRealPath]]]
variable[LO] assign[=] call[name[Locker], parameter[]]
<ast.Tuple object at 0x7da1b021f0a0> assign[=] call[name[LO].acquire_lock, parameter[]]
if <ast.UnaryOp object at 0x7da1b021ebc0> begin[:]
variable[error] assign[=] binary_operation[constant[Code %s. Unable to aquire the lock for old file '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b021e0e0>, <ast.Name object at 0x7da1b021e500>]]]
assert[<ast.UnaryOp object at 0x7da1b021c580>]
return[tuple[[<ast.Constant object at 0x7da1b021f8b0>, <ast.Name object at 0x7da1b021faf0>]]]
<ast.Try object at 0x7da1b021c2e0>
if <ast.UnaryOp object at 0x7da1b021c430> begin[:]
call[name[LO].release_lock, parameter[]]
assert[<ast.UnaryOp object at 0x7da1b021f7c0>]
return[tuple[[<ast.Constant object at 0x7da1b021c160>, <ast.Name object at 0x7da1b021e560>]]]
variable[LN] assign[=] call[name[Locker], parameter[]]
<ast.Tuple object at 0x7da1b021eb90> assign[=] call[name[LN].acquire_lock, parameter[]]
if <ast.UnaryOp object at 0x7da1b021ed70> begin[:]
call[name[LO].release_lock, parameter[]]
variable[error] assign[=] binary_operation[constant[Code %s. Unable to aquire the lock for new file path '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b021c3d0>, <ast.Name object at 0x7da1b021f3a0>]]]
assert[<ast.UnaryOp object at 0x7da1b021e920>]
return[tuple[[<ast.Constant object at 0x7da1b021eb00>, <ast.Name object at 0x7da1b021c730>]]]
for taget[name[_trial]] in starred[call[name[range], parameter[name[ntrials]]]] begin[:]
variable[renamed] assign[=] constant[False]
variable[error] assign[=] constant[None]
<ast.Try object at 0x7da204567d30>
call[name[LO].release_lock, parameter[]]
call[name[LN].release_lock, parameter[]]
<ast.Try object at 0x7da20c993400>
assert[<ast.BoolOp object at 0x7da2047e96f0>]
return[tuple[[<ast.Name object at 0x7da2047eabf0>, <ast.Name object at 0x7da2047e81c0>]]]
|
keyword[def] identifier[rename_file] ( identifier[self] , identifier[relativePath] , identifier[newRelativePath] ,
identifier[force] = keyword[False] , identifier[raiseError] = keyword[True] , identifier[ntrials] = literal[int] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[raiseError] , identifier[bool] ), literal[string]
keyword[assert] identifier[isinstance] ( identifier[force] , identifier[bool] ), literal[string]
keyword[assert] identifier[isinstance] ( identifier[ntrials] , identifier[int] ), literal[string]
keyword[assert] identifier[ntrials] > literal[int] , literal[string]
identifier[relativePath] = identifier[self] . identifier[to_repo_relative_path] ( identifier[path] = identifier[relativePath] , identifier[split] = keyword[False] )
identifier[realPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[__path] , identifier[relativePath] )
identifier[fPath] , identifier[fName] = identifier[os] . identifier[path] . identifier[split] ( identifier[realPath] )
identifier[newRelativePath] = identifier[self] . identifier[to_repo_relative_path] ( identifier[path] = identifier[newRelativePath] , identifier[split] = keyword[False] )
identifier[newRealPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[__path] , identifier[newRelativePath] )
identifier[nfPath] , identifier[nfName] = identifier[os] . identifier[path] . identifier[split] ( identifier[newRealPath] )
identifier[LO] = identifier[Locker] ( identifier[filePath] = keyword[None] , identifier[lockPass] = identifier[str] ( identifier[uuid] . identifier[uuid1] ()), identifier[lockPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileLock] % identifier[fName] ))
identifier[acquired] , identifier[code] = identifier[LO] . identifier[acquire_lock] ()
keyword[if] keyword[not] identifier[acquired] :
identifier[error] = literal[string] %( identifier[code] , identifier[relativePath] )
keyword[assert] keyword[not] identifier[raiseError] , identifier[error]
keyword[return] keyword[False] , identifier[error]
keyword[try] :
identifier[success] , identifier[reason] = identifier[self] . identifier[add_directory] ( identifier[nfPath] , identifier[raiseError] = keyword[False] , identifier[ntrials] = identifier[ntrials] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[reason] = literal[string] %( identifier[str] ( identifier[err] ))
identifier[success] = keyword[False]
keyword[if] keyword[not] identifier[success] :
identifier[LO] . identifier[release_lock] ()
keyword[assert] keyword[not] identifier[raiseError] , identifier[reason]
keyword[return] keyword[False] , identifier[reason]
identifier[LN] = identifier[Locker] ( identifier[filePath] = keyword[None] , identifier[lockPass] = identifier[str] ( identifier[uuid] . identifier[uuid1] ()), identifier[lockPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[nfPath] , identifier[self] . identifier[__fileLock] % identifier[nfName] ))
identifier[acquired] , identifier[code] = identifier[LN] . identifier[acquire_lock] ()
keyword[if] keyword[not] identifier[acquired] :
identifier[LO] . identifier[release_lock] ()
identifier[error] = literal[string] %( identifier[code] , identifier[newRelativePath] )
keyword[assert] keyword[not] identifier[raiseError] , identifier[error]
keyword[return] keyword[False] , identifier[error]
keyword[for] identifier[_trial] keyword[in] identifier[range] ( identifier[ntrials] ):
identifier[renamed] = keyword[False]
identifier[error] = keyword[None]
keyword[try] :
identifier[isRepoFile] , identifier[fileOnDisk] , identifier[infoOnDisk] , identifier[classOnDisk] = identifier[self] . identifier[is_repository_file] ( identifier[relativePath] )
keyword[assert] identifier[isRepoFile] , literal[string] %( identifier[relativePath] ,)
keyword[assert] identifier[fileOnDisk] , literal[string] %( identifier[relativePath] ,)
keyword[assert] identifier[infoOnDisk] , literal[string] % identifier[self] . identifier[__fileInfo] % identifier[fName]
keyword[assert] identifier[classOnDisk] , literal[string] % identifier[self] . identifier[__fileClass] % identifier[fName]
identifier[nisRepoFile] , identifier[nfileOnDisk] , identifier[ninfoOnDisk] , identifier[nclassOnDisk] = identifier[self] . identifier[is_repository_file] ( identifier[newRelativePath] )
keyword[assert] keyword[not] identifier[nisRepoFile] keyword[or] identifier[force] , literal[string]
identifier[oDirList] = identifier[self] . identifier[__get_repository_directory] ( identifier[fPath] )
identifier[nDirList] = identifier[self] . identifier[__get_repository_directory] ( identifier[nfPath] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[newRealPath] ):
identifier[os] . identifier[remove] ( identifier[newRealPath] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[nfPath] , identifier[self] . identifier[__fileInfo] % identifier[nfName] )):
identifier[os] . identifier[remove] ( identifier[os] . identifier[path] . identifier[join] ( identifier[nfPath] , identifier[self] . identifier[__fileInfo] % identifier[nfName] ))
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[nfPath] , identifier[self] . identifier[__fileClass] % identifier[nfName] )):
identifier[os] . identifier[remove] ( identifier[os] . identifier[path] . identifier[join] ( identifier[nfPath] , identifier[self] . identifier[__fileClass] % identifier[nfName] ))
identifier[os] . identifier[rename] ( identifier[realPath] , identifier[newRealPath] )
identifier[os] . identifier[rename] ( identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileInfo] % identifier[fName] ), identifier[os] . identifier[path] . identifier[join] ( identifier[nfPath] , identifier[self] . identifier[__fileInfo] % identifier[nfName] ))
identifier[os] . identifier[rename] ( identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileClass] % identifier[fName] ), identifier[os] . identifier[path] . identifier[join] ( identifier[nfPath] , identifier[self] . identifier[__fileClass] % identifier[nfName] ))
identifier[findex] = identifier[oDirList] . identifier[index] ( identifier[fName] )
identifier[oDirList] . identifier[pop] ( identifier[findex] )
keyword[if] identifier[nfName] keyword[not] keyword[in] identifier[nDirList] :
identifier[nDirList] . identifier[append] ( identifier[nfName] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[renamed] = keyword[False]
identifier[error] = identifier[str] ( identifier[err] )
keyword[if] identifier[self] . identifier[DEBUG_PRINT_FAILED_TRIALS] : identifier[print] ( literal[string] %( identifier[_trial] , identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ], identifier[str] ( identifier[error] )))
keyword[else] :
identifier[renamed] = keyword[True]
keyword[break]
identifier[LO] . identifier[release_lock] ()
identifier[LN] . identifier[release_lock] ()
keyword[try] :
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileLock] % identifier[fName] )):
identifier[os] . identifier[remove] ( identifier[os] . identifier[path] . identifier[join] ( identifier[fPath] , identifier[self] . identifier[__fileLock] % identifier[fName] ))
keyword[except] :
keyword[pass]
keyword[assert] identifier[renamed] keyword[or] keyword[not] identifier[raiseError] , literal[string] %( identifier[relativePath] , identifier[newRelativePath] , identifier[ntrials] , identifier[error] ,)
keyword[return] identifier[renamed] , identifier[error]
|
def rename_file(self, relativePath, newRelativePath, force=False, raiseError=True, ntrials=3):
"""
Rename a file in the repository. It insures renaming the file in the system.
:Parameters:
#. relativePath (string): The relative to the repository path of
the file that needst to be renamed.
#. newRelativePath (string): The new relative to the repository path
of where to move and rename the file.
#. force (boolean): Whether to force renaming even when another
repository file exists. In this case old repository file
will be removed from the repository and the system as well.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether renaming the file was successful.
#. message (None, string): Some explanatory message or error reason
why directory was not updated.
"""
assert isinstance(raiseError, bool), 'raiseError must be boolean'
assert isinstance(force, bool), 'force must be boolean'
assert isinstance(ntrials, int), 'ntrials must be integer'
assert ntrials > 0, 'ntrials must be >0'
# check old name and path
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
realPath = os.path.join(self.__path, relativePath)
(fPath, fName) = os.path.split(realPath)
# check new name and path
newRelativePath = self.to_repo_relative_path(path=newRelativePath, split=False)
newRealPath = os.path.join(self.__path, newRelativePath)
(nfPath, nfName) = os.path.split(newRealPath)
# lock old file
LO = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath, self.__fileLock % fName))
(acquired, code) = LO.acquire_lock()
if not acquired:
error = "Code %s. Unable to aquire the lock for old file '%s'" % (code, relativePath)
assert not raiseError, error
return (False, error) # depends on [control=['if'], data=[]]
# add directory
try:
(success, reason) = self.add_directory(nfPath, raiseError=False, ntrials=ntrials) # depends on [control=['try'], data=[]]
except Exception as err:
reason = 'Unable to add directory (%s)' % str(err)
success = False # depends on [control=['except'], data=['err']]
if not success:
LO.release_lock()
assert not raiseError, reason
return (False, reason) # depends on [control=['if'], data=[]]
# create new file lock
LN = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(nfPath, self.__fileLock % nfName))
(acquired, code) = LN.acquire_lock()
if not acquired:
LO.release_lock()
error = "Code %s. Unable to aquire the lock for new file path '%s'" % (code, newRelativePath)
assert not raiseError, error
return (False, error) # depends on [control=['if'], data=[]]
# rename file
for _trial in range(ntrials):
renamed = False
error = None
try:
# check whether it's a repository file
(isRepoFile, fileOnDisk, infoOnDisk, classOnDisk) = self.is_repository_file(relativePath)
assert isRepoFile, "file '%s' is not a repository file" % (relativePath,)
assert fileOnDisk, "file '%s' is found on disk" % (relativePath,)
assert infoOnDisk, '%s is found on disk' % self.__fileInfo % fName
assert classOnDisk, '%s is found on disk' % self.__fileClass % fName
# get new file path
(nisRepoFile, nfileOnDisk, ninfoOnDisk, nclassOnDisk) = self.is_repository_file(newRelativePath)
assert not nisRepoFile or force, 'New file path is a registered repository file, set force to True to proceed regardless'
# get parent directories list
oDirList = self.__get_repository_directory(fPath)
nDirList = self.__get_repository_directory(nfPath)
# remove new file and all repository files from disk
if os.path.isfile(newRealPath):
os.remove(newRealPath) # depends on [control=['if'], data=[]]
if os.path.isfile(os.path.join(nfPath, self.__fileInfo % nfName)):
os.remove(os.path.join(nfPath, self.__fileInfo % nfName)) # depends on [control=['if'], data=[]]
if os.path.isfile(os.path.join(nfPath, self.__fileClass % nfName)):
os.remove(os.path.join(nfPath, self.__fileClass % nfName)) # depends on [control=['if'], data=[]]
# move old file to new path
os.rename(realPath, newRealPath)
os.rename(os.path.join(fPath, self.__fileInfo % fName), os.path.join(nfPath, self.__fileInfo % nfName))
os.rename(os.path.join(fPath, self.__fileClass % fName), os.path.join(nfPath, self.__fileClass % nfName))
# update list
findex = oDirList.index(fName)
oDirList.pop(findex)
# update new list
if nfName not in nDirList:
nDirList.append(nfName) # depends on [control=['if'], data=['nfName', 'nDirList']] # depends on [control=['try'], data=[]]
except Exception as err:
renamed = False
error = str(err)
if self.DEBUG_PRINT_FAILED_TRIALS:
print('Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute' % (_trial, inspect.stack()[1][3], str(error))) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']]
else:
renamed = True
break # depends on [control=['for'], data=['_trial']]
# release locks
LO.release_lock()
LN.release_lock()
# always clean old file lock
try:
if os.path.isfile(os.path.join(fPath, self.__fileLock % fName)):
os.remove(os.path.join(fPath, self.__fileLock % fName)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
# return
assert renamed or not raiseError, "Unable to rename file '%s' to '%s' after %i trials (%s)" % (relativePath, newRelativePath, ntrials, error)
#assert renamed or not raiseError, '\n'.join(message)
return (renamed, error)
|
def query(url, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
'''
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
return salt.utils.http.query(url=url, opts=opts, **kwargs)
|
def function[query, parameter[url]]:
constant[
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
]
variable[opts] assign[=] call[name[__opts__].copy, parameter[]]
if compare[constant[opts] in name[kwargs]] begin[:]
call[name[opts].update, parameter[call[name[kwargs]][constant[opts]]]]
<ast.Delete object at 0x7da1b1c16470>
return[call[name[salt].utils.http.query, parameter[]]]
|
keyword[def] identifier[query] ( identifier[url] ,** identifier[kwargs] ):
literal[string]
identifier[opts] = identifier[__opts__] . identifier[copy] ()
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[opts] . identifier[update] ( identifier[kwargs] [ literal[string] ])
keyword[del] identifier[kwargs] [ literal[string] ]
keyword[return] identifier[salt] . identifier[utils] . identifier[http] . identifier[query] ( identifier[url] = identifier[url] , identifier[opts] = identifier[opts] ,** identifier[kwargs] )
|
def query(url, **kwargs):
"""
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
"""
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts'] # depends on [control=['if'], data=['kwargs']]
return salt.utils.http.query(url=url, opts=opts, **kwargs)
|
def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
LOG.info("Deleting QoS policy %(qos_policy)s on port %(port)s",
dict(qos_policy=qos_policy, port=port))
self._utils.remove_port_qos_rule(port["port_id"])
|
def function[delete, parameter[self, port, qos_policy]]:
constant[Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
]
call[name[LOG].info, parameter[constant[Deleting QoS policy %(qos_policy)s on port %(port)s], call[name[dict], parameter[]]]]
call[name[self]._utils.remove_port_qos_rule, parameter[call[name[port]][constant[port_id]]]]
|
keyword[def] identifier[delete] ( identifier[self] , identifier[port] , identifier[qos_policy] = keyword[None] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] ,
identifier[dict] ( identifier[qos_policy] = identifier[qos_policy] , identifier[port] = identifier[port] ))
identifier[self] . identifier[_utils] . identifier[remove_port_qos_rule] ( identifier[port] [ literal[string] ])
|
def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
LOG.info('Deleting QoS policy %(qos_policy)s on port %(port)s', dict(qos_policy=qos_policy, port=port))
self._utils.remove_port_qos_rule(port['port_id'])
|
def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data]
|
def function[rows, parameter[self]]:
constant[
Returns the table rows.
]
return[<ast.ListComp object at 0x7da20c796b30>]
|
keyword[def] identifier[rows] ( identifier[self] )-> identifier[List] [ identifier[List] [ identifier[str] ]]:
literal[string]
keyword[return] [ identifier[list] ( identifier[d] . identifier[values] ()) keyword[for] identifier[d] keyword[in] identifier[self] . identifier[data] ]
|
def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data]
|
def get_search_token_from_orcid(self, scope='/read-public'):
"""Get a token for searching ORCID records.
Parameters
----------
:param scope: string
/read-public or /read-member
Returns
-------
:returns: string
The token.
"""
payload = {'client_id': self._key,
'client_secret': self._secret,
'scope': scope,
'grant_type': 'client_credentials'
}
url = "%s/oauth/token" % self._endpoint
headers = {'Accept': 'application/json'}
response = requests.post(url, data=payload, headers=headers,
timeout=self._timeout)
response.raise_for_status()
if self.do_store_raw_response:
self.raw_response = response
return response.json()['access_token']
|
def function[get_search_token_from_orcid, parameter[self, scope]]:
constant[Get a token for searching ORCID records.
Parameters
----------
:param scope: string
/read-public or /read-member
Returns
-------
:returns: string
The token.
]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da204567790>, <ast.Constant object at 0x7da204567340>, <ast.Constant object at 0x7da204567d30>, <ast.Constant object at 0x7da204565fc0>], [<ast.Attribute object at 0x7da204564e80>, <ast.Attribute object at 0x7da204565570>, <ast.Name object at 0x7da2045641c0>, <ast.Constant object at 0x7da2045649d0>]]
variable[url] assign[=] binary_operation[constant[%s/oauth/token] <ast.Mod object at 0x7da2590d6920> name[self]._endpoint]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da204564910>], [<ast.Constant object at 0x7da204567e80>]]
variable[response] assign[=] call[name[requests].post, parameter[name[url]]]
call[name[response].raise_for_status, parameter[]]
if name[self].do_store_raw_response begin[:]
name[self].raw_response assign[=] name[response]
return[call[call[name[response].json, parameter[]]][constant[access_token]]]
|
keyword[def] identifier[get_search_token_from_orcid] ( identifier[self] , identifier[scope] = literal[string] ):
literal[string]
identifier[payload] ={ literal[string] : identifier[self] . identifier[_key] ,
literal[string] : identifier[self] . identifier[_secret] ,
literal[string] : identifier[scope] ,
literal[string] : literal[string]
}
identifier[url] = literal[string] % identifier[self] . identifier[_endpoint]
identifier[headers] ={ literal[string] : literal[string] }
identifier[response] = identifier[requests] . identifier[post] ( identifier[url] , identifier[data] = identifier[payload] , identifier[headers] = identifier[headers] ,
identifier[timeout] = identifier[self] . identifier[_timeout] )
identifier[response] . identifier[raise_for_status] ()
keyword[if] identifier[self] . identifier[do_store_raw_response] :
identifier[self] . identifier[raw_response] = identifier[response]
keyword[return] identifier[response] . identifier[json] ()[ literal[string] ]
|
def get_search_token_from_orcid(self, scope='/read-public'):
"""Get a token for searching ORCID records.
Parameters
----------
:param scope: string
/read-public or /read-member
Returns
-------
:returns: string
The token.
"""
payload = {'client_id': self._key, 'client_secret': self._secret, 'scope': scope, 'grant_type': 'client_credentials'}
url = '%s/oauth/token' % self._endpoint
headers = {'Accept': 'application/json'}
response = requests.post(url, data=payload, headers=headers, timeout=self._timeout)
response.raise_for_status()
if self.do_store_raw_response:
self.raw_response = response # depends on [control=['if'], data=[]]
return response.json()['access_token']
|
def Flavio_to_Fierz_nunu(C, ddll, parameters, norm_gf=True):
"""From Flavio semileptonic basis to semileptonic Fierz basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
p = parameters
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
if ddll[:2] == 'sb':
xi = V[2, 2] * V[2, 1].conj()
elif ddll[:2] == 'db':
xi = V[2, 2] * V[2, 0].conj()
elif ddll[:2] == 'ds':
xi = V[2, 1] * V[2, 0].conj()
else:
raise ValueError("Unexpected flavours: {}".format(ddll[:2]))
q1, q2 = ddll[:2]
l1 = ddll[4:ddll.find('n')]
l2 = ddll[ddll.find('_', 5) + 1:]
ind = q1 + q2 + l1 + l2
# flavio has indices within currents inverted
indnu = q2 + q1 + 'nu' + l2 + 'nu' + l1
e = sqrt(4* pi * parameters['alpha_e'])
dic = {
'F' + ind + 'nu': C["CL_" + indnu] / ((8 * pi**2) / e**2),
'F' + ind + 'nup': C["CR_" + indnu] / ((8 * pi**2) / e**2),
}
if norm_gf:
prefactor = sqrt(2)/p['GF']/xi/4
else:
prefactor = 1 / xi
return {k: v / prefactor for k, v in dic.items()}
|
def function[Flavio_to_Fierz_nunu, parameter[C, ddll, parameters, norm_gf]]:
constant[From Flavio semileptonic basis to semileptonic Fierz basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.]
variable[p] assign[=] name[parameters]
variable[V] assign[=] call[name[ckmutil].ckm.ckm_tree, parameter[call[name[p]][constant[Vus]], call[name[p]][constant[Vub]], call[name[p]][constant[Vcb]], call[name[p]][constant[delta]]]]
if compare[call[name[ddll]][<ast.Slice object at 0x7da1b195c8e0>] equal[==] constant[sb]] begin[:]
variable[xi] assign[=] binary_operation[call[name[V]][tuple[[<ast.Constant object at 0x7da1b195ca90>, <ast.Constant object at 0x7da1b195cac0>]]] * call[call[name[V]][tuple[[<ast.Constant object at 0x7da1b195cbe0>, <ast.Constant object at 0x7da1b195cc10>]]].conj, parameter[]]]
<ast.Tuple object at 0x7da1b1981420> assign[=] call[name[ddll]][<ast.Slice object at 0x7da1b1a9c520>]
variable[l1] assign[=] call[name[ddll]][<ast.Slice object at 0x7da1b1a9c5b0>]
variable[l2] assign[=] call[name[ddll]][<ast.Slice object at 0x7da1b1a9cf70>]
variable[ind] assign[=] binary_operation[binary_operation[binary_operation[name[q1] + name[q2]] + name[l1]] + name[l2]]
variable[indnu] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[q2] + name[q1]] + constant[nu]] + name[l2]] + constant[nu]] + name[l1]]
variable[e] assign[=] call[name[sqrt], parameter[binary_operation[binary_operation[constant[4] * name[pi]] * call[name[parameters]][constant[alpha_e]]]]]
variable[dic] assign[=] dictionary[[<ast.BinOp object at 0x7da1b19705e0>, <ast.BinOp object at 0x7da1b19706d0>], [<ast.BinOp object at 0x7da1b19707c0>, <ast.BinOp object at 0x7da1b1970a90>]]
if name[norm_gf] begin[:]
variable[prefactor] assign[=] binary_operation[binary_operation[binary_operation[call[name[sqrt], parameter[constant[2]]] / call[name[p]][constant[GF]]] / name[xi]] / constant[4]]
return[<ast.DictComp object at 0x7da1b1971390>]
|
keyword[def] identifier[Flavio_to_Fierz_nunu] ( identifier[C] , identifier[ddll] , identifier[parameters] , identifier[norm_gf] = keyword[True] ):
literal[string]
identifier[p] = identifier[parameters]
identifier[V] = identifier[ckmutil] . identifier[ckm] . identifier[ckm_tree] ( identifier[p] [ literal[string] ], identifier[p] [ literal[string] ], identifier[p] [ literal[string] ], identifier[p] [ literal[string] ])
keyword[if] identifier[ddll] [: literal[int] ]== literal[string] :
identifier[xi] = identifier[V] [ literal[int] , literal[int] ]* identifier[V] [ literal[int] , literal[int] ]. identifier[conj] ()
keyword[elif] identifier[ddll] [: literal[int] ]== literal[string] :
identifier[xi] = identifier[V] [ literal[int] , literal[int] ]* identifier[V] [ literal[int] , literal[int] ]. identifier[conj] ()
keyword[elif] identifier[ddll] [: literal[int] ]== literal[string] :
identifier[xi] = identifier[V] [ literal[int] , literal[int] ]* identifier[V] [ literal[int] , literal[int] ]. identifier[conj] ()
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[ddll] [: literal[int] ]))
identifier[q1] , identifier[q2] = identifier[ddll] [: literal[int] ]
identifier[l1] = identifier[ddll] [ literal[int] : identifier[ddll] . identifier[find] ( literal[string] )]
identifier[l2] = identifier[ddll] [ identifier[ddll] . identifier[find] ( literal[string] , literal[int] )+ literal[int] :]
identifier[ind] = identifier[q1] + identifier[q2] + identifier[l1] + identifier[l2]
identifier[indnu] = identifier[q2] + identifier[q1] + literal[string] + identifier[l2] + literal[string] + identifier[l1]
identifier[e] = identifier[sqrt] ( literal[int] * identifier[pi] * identifier[parameters] [ literal[string] ])
identifier[dic] ={
literal[string] + identifier[ind] + literal[string] : identifier[C] [ literal[string] + identifier[indnu] ]/(( literal[int] * identifier[pi] ** literal[int] )/ identifier[e] ** literal[int] ),
literal[string] + identifier[ind] + literal[string] : identifier[C] [ literal[string] + identifier[indnu] ]/(( literal[int] * identifier[pi] ** literal[int] )/ identifier[e] ** literal[int] ),
}
keyword[if] identifier[norm_gf] :
identifier[prefactor] = identifier[sqrt] ( literal[int] )/ identifier[p] [ literal[string] ]/ identifier[xi] / literal[int]
keyword[else] :
identifier[prefactor] = literal[int] / identifier[xi]
keyword[return] { identifier[k] : identifier[v] / identifier[prefactor] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dic] . identifier[items] ()}
|
def Flavio_to_Fierz_nunu(C, ddll, parameters, norm_gf=True):
"""From Flavio semileptonic basis to semileptonic Fierz basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
p = parameters
V = ckmutil.ckm.ckm_tree(p['Vus'], p['Vub'], p['Vcb'], p['delta'])
if ddll[:2] == 'sb':
xi = V[2, 2] * V[2, 1].conj() # depends on [control=['if'], data=[]]
elif ddll[:2] == 'db':
xi = V[2, 2] * V[2, 0].conj() # depends on [control=['if'], data=[]]
elif ddll[:2] == 'ds':
xi = V[2, 1] * V[2, 0].conj() # depends on [control=['if'], data=[]]
else:
raise ValueError('Unexpected flavours: {}'.format(ddll[:2]))
(q1, q2) = ddll[:2]
l1 = ddll[4:ddll.find('n')]
l2 = ddll[ddll.find('_', 5) + 1:]
ind = q1 + q2 + l1 + l2
# flavio has indices within currents inverted
indnu = q2 + q1 + 'nu' + l2 + 'nu' + l1
e = sqrt(4 * pi * parameters['alpha_e'])
dic = {'F' + ind + 'nu': C['CL_' + indnu] / (8 * pi ** 2 / e ** 2), 'F' + ind + 'nup': C['CR_' + indnu] / (8 * pi ** 2 / e ** 2)}
if norm_gf:
prefactor = sqrt(2) / p['GF'] / xi / 4 # depends on [control=['if'], data=[]]
else:
prefactor = 1 / xi
return {k: v / prefactor for (k, v) in dic.items()}
|
def sys_munmap(self, addr, size):
"""
Unmaps a file from memory. It deletes the mappings for the specified address range
:rtype: int
:param addr: the starting address to unmap.
:param size: the size of the portion to unmap.
:return: C{0} on success.
"""
self.current.memory.munmap(addr, size)
return 0
|
def function[sys_munmap, parameter[self, addr, size]]:
constant[
Unmaps a file from memory. It deletes the mappings for the specified address range
:rtype: int
:param addr: the starting address to unmap.
:param size: the size of the portion to unmap.
:return: C{0} on success.
]
call[name[self].current.memory.munmap, parameter[name[addr], name[size]]]
return[constant[0]]
|
keyword[def] identifier[sys_munmap] ( identifier[self] , identifier[addr] , identifier[size] ):
literal[string]
identifier[self] . identifier[current] . identifier[memory] . identifier[munmap] ( identifier[addr] , identifier[size] )
keyword[return] literal[int]
|
def sys_munmap(self, addr, size):
"""
Unmaps a file from memory. It deletes the mappings for the specified address range
:rtype: int
:param addr: the starting address to unmap.
:param size: the size of the portion to unmap.
:return: C{0} on success.
"""
self.current.memory.munmap(addr, size)
return 0
|
def recent_comments(context):
"""
Dashboard widget for displaying recent comments.
"""
latest = context["settings"].COMMENTS_NUM_LATEST
comments = ThreadedComment.objects.all().select_related("user")
context["comments"] = comments.order_by("-id")[:latest]
return context
|
def function[recent_comments, parameter[context]]:
constant[
Dashboard widget for displaying recent comments.
]
variable[latest] assign[=] call[name[context]][constant[settings]].COMMENTS_NUM_LATEST
variable[comments] assign[=] call[call[name[ThreadedComment].objects.all, parameter[]].select_related, parameter[constant[user]]]
call[name[context]][constant[comments]] assign[=] call[call[name[comments].order_by, parameter[constant[-id]]]][<ast.Slice object at 0x7da1b13094e0>]
return[name[context]]
|
keyword[def] identifier[recent_comments] ( identifier[context] ):
literal[string]
identifier[latest] = identifier[context] [ literal[string] ]. identifier[COMMENTS_NUM_LATEST]
identifier[comments] = identifier[ThreadedComment] . identifier[objects] . identifier[all] (). identifier[select_related] ( literal[string] )
identifier[context] [ literal[string] ]= identifier[comments] . identifier[order_by] ( literal[string] )[: identifier[latest] ]
keyword[return] identifier[context]
|
def recent_comments(context):
"""
Dashboard widget for displaying recent comments.
"""
latest = context['settings'].COMMENTS_NUM_LATEST
comments = ThreadedComment.objects.all().select_related('user')
context['comments'] = comments.order_by('-id')[:latest]
return context
|
def format_tsv_line(source, edge, target, value=None, metadata=None):
"""
Render a single line for TSV file with data flow described
:type source str
:type edge str
:type target str
:type value float
:type metadata str
:rtype: str
"""
return '{source}\t{edge}\t{target}\t{value}\t{metadata}'.format(
source=source,
edge=edge,
target=target,
value='{:.4f}'.format(value) if value is not None else '',
metadata=metadata or ''
).rstrip(' \t')
|
def function[format_tsv_line, parameter[source, edge, target, value, metadata]]:
constant[
Render a single line for TSV file with data flow described
:type source str
:type edge str
:type target str
:type value float
:type metadata str
:rtype: str
]
return[call[call[constant[{source} {edge} {target} {value} {metadata}].format, parameter[]].rstrip, parameter[constant[ ]]]]
|
keyword[def] identifier[format_tsv_line] ( identifier[source] , identifier[edge] , identifier[target] , identifier[value] = keyword[None] , identifier[metadata] = keyword[None] ):
literal[string]
keyword[return] literal[string] . identifier[format] (
identifier[source] = identifier[source] ,
identifier[edge] = identifier[edge] ,
identifier[target] = identifier[target] ,
identifier[value] = literal[string] . identifier[format] ( identifier[value] ) keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] ,
identifier[metadata] = identifier[metadata] keyword[or] literal[string]
). identifier[rstrip] ( literal[string] )
|
def format_tsv_line(source, edge, target, value=None, metadata=None):
"""
Render a single line for TSV file with data flow described
:type source str
:type edge str
:type target str
:type value float
:type metadata str
:rtype: str
"""
return '{source}\t{edge}\t{target}\t{value}\t{metadata}'.format(source=source, edge=edge, target=target, value='{:.4f}'.format(value) if value is not None else '', metadata=metadata or '').rstrip(' \t')
|
def publish(self,topic,options=None,args=None,kwargs=None):
""" Publishes a messages to the server
"""
topic = self.get_full_uri(topic)
if options is None:
options = {'acknowledge':True}
if options.get('acknowledge'):
request = PUBLISH(
options=options or {},
topic=topic,
args=args or [],
kwargs=kwargs or {}
)
result = self.send_and_await_response(request)
return result
else:
request = PUBLISH(
options=options or {},
topic=topic,
args=args or [],
kwargs=kwargs or {}
)
self.send_message(request)
return request.request_id
|
def function[publish, parameter[self, topic, options, args, kwargs]]:
constant[ Publishes a messages to the server
]
variable[topic] assign[=] call[name[self].get_full_uri, parameter[name[topic]]]
if compare[name[options] is constant[None]] begin[:]
variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da18fe90700>], [<ast.Constant object at 0x7da18fe91ff0>]]
if call[name[options].get, parameter[constant[acknowledge]]] begin[:]
variable[request] assign[=] call[name[PUBLISH], parameter[]]
variable[result] assign[=] call[name[self].send_and_await_response, parameter[name[request]]]
return[name[result]]
|
keyword[def] identifier[publish] ( identifier[self] , identifier[topic] , identifier[options] = keyword[None] , identifier[args] = keyword[None] , identifier[kwargs] = keyword[None] ):
literal[string]
identifier[topic] = identifier[self] . identifier[get_full_uri] ( identifier[topic] )
keyword[if] identifier[options] keyword[is] keyword[None] :
identifier[options] ={ literal[string] : keyword[True] }
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[request] = identifier[PUBLISH] (
identifier[options] = identifier[options] keyword[or] {},
identifier[topic] = identifier[topic] ,
identifier[args] = identifier[args] keyword[or] [],
identifier[kwargs] = identifier[kwargs] keyword[or] {}
)
identifier[result] = identifier[self] . identifier[send_and_await_response] ( identifier[request] )
keyword[return] identifier[result]
keyword[else] :
identifier[request] = identifier[PUBLISH] (
identifier[options] = identifier[options] keyword[or] {},
identifier[topic] = identifier[topic] ,
identifier[args] = identifier[args] keyword[or] [],
identifier[kwargs] = identifier[kwargs] keyword[or] {}
)
identifier[self] . identifier[send_message] ( identifier[request] )
keyword[return] identifier[request] . identifier[request_id]
|
def publish(self, topic, options=None, args=None, kwargs=None):
""" Publishes a messages to the server
"""
topic = self.get_full_uri(topic)
if options is None:
options = {'acknowledge': True} # depends on [control=['if'], data=['options']]
if options.get('acknowledge'):
request = PUBLISH(options=options or {}, topic=topic, args=args or [], kwargs=kwargs or {})
result = self.send_and_await_response(request)
return result # depends on [control=['if'], data=[]]
else:
request = PUBLISH(options=options or {}, topic=topic, args=args or [], kwargs=kwargs or {})
self.send_message(request)
return request.request_id
|
def default_callback(pending, timeout):
"""This is the default shutdown callback that is set on the options.
It prints out a message to stderr that informs the user that some events
are still pending and the process is waiting for them to flush out.
"""
def echo(msg):
sys.stderr.write(msg + "\n")
echo("Sentry is attempting to send %i pending error messages" % pending)
echo("Waiting up to %s seconds" % timeout)
echo("Press Ctrl-%s to quit" % (os.name == "nt" and "Break" or "C"))
sys.stderr.flush()
|
def function[default_callback, parameter[pending, timeout]]:
constant[This is the default shutdown callback that is set on the options.
It prints out a message to stderr that informs the user that some events
are still pending and the process is waiting for them to flush out.
]
def function[echo, parameter[msg]]:
call[name[sys].stderr.write, parameter[binary_operation[name[msg] + constant[
]]]]
call[name[echo], parameter[binary_operation[constant[Sentry is attempting to send %i pending error messages] <ast.Mod object at 0x7da2590d6920> name[pending]]]]
call[name[echo], parameter[binary_operation[constant[Waiting up to %s seconds] <ast.Mod object at 0x7da2590d6920> name[timeout]]]]
call[name[echo], parameter[binary_operation[constant[Press Ctrl-%s to quit] <ast.Mod object at 0x7da2590d6920> <ast.BoolOp object at 0x7da1b18a0d90>]]]
call[name[sys].stderr.flush, parameter[]]
|
keyword[def] identifier[default_callback] ( identifier[pending] , identifier[timeout] ):
literal[string]
keyword[def] identifier[echo] ( identifier[msg] ):
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[msg] + literal[string] )
identifier[echo] ( literal[string] % identifier[pending] )
identifier[echo] ( literal[string] % identifier[timeout] )
identifier[echo] ( literal[string] %( identifier[os] . identifier[name] == literal[string] keyword[and] literal[string] keyword[or] literal[string] ))
identifier[sys] . identifier[stderr] . identifier[flush] ()
|
def default_callback(pending, timeout):
"""This is the default shutdown callback that is set on the options.
It prints out a message to stderr that informs the user that some events
are still pending and the process is waiting for them to flush out.
"""
def echo(msg):
sys.stderr.write(msg + '\n')
echo('Sentry is attempting to send %i pending error messages' % pending)
echo('Waiting up to %s seconds' % timeout)
echo('Press Ctrl-%s to quit' % (os.name == 'nt' and 'Break' or 'C'))
sys.stderr.flush()
|
def update_lbaas_member(self, lbaas_member, lbaas_pool, body=None):
"""Updates a lbaas_member."""
return self.put(self.lbaas_member_path % (lbaas_pool, lbaas_member),
body=body)
|
def function[update_lbaas_member, parameter[self, lbaas_member, lbaas_pool, body]]:
constant[Updates a lbaas_member.]
return[call[name[self].put, parameter[binary_operation[name[self].lbaas_member_path <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00f130>, <ast.Name object at 0x7da18f00e710>]]]]]]
|
keyword[def] identifier[update_lbaas_member] ( identifier[self] , identifier[lbaas_member] , identifier[lbaas_pool] , identifier[body] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[put] ( identifier[self] . identifier[lbaas_member_path] %( identifier[lbaas_pool] , identifier[lbaas_member] ),
identifier[body] = identifier[body] )
|
def update_lbaas_member(self, lbaas_member, lbaas_pool, body=None):
"""Updates a lbaas_member."""
return self.put(self.lbaas_member_path % (lbaas_pool, lbaas_member), body=body)
|
def _reset_non_empty(self, indices):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
observ = tf.py_func(
self._batch_env.reset, [indices], self.observ_dtype, name="reset")
observ.set_shape(indices.get_shape().concatenate(self.observ_shape))
with tf.control_dependencies([
tf.scatter_update(self._observ, indices, observ)]):
return tf.identity(observ)
|
def function[_reset_non_empty, parameter[self, indices]]:
constant[Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
]
variable[observ] assign[=] call[name[tf].py_func, parameter[name[self]._batch_env.reset, list[[<ast.Name object at 0x7da18bcca740>]], name[self].observ_dtype]]
call[name[observ].set_shape, parameter[call[call[name[indices].get_shape, parameter[]].concatenate, parameter[name[self].observ_shape]]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Call object at 0x7da20c6c6710>]]]] begin[:]
return[call[name[tf].identity, parameter[name[observ]]]]
|
keyword[def] identifier[_reset_non_empty] ( identifier[self] , identifier[indices] ):
literal[string]
identifier[observ] = identifier[tf] . identifier[py_func] (
identifier[self] . identifier[_batch_env] . identifier[reset] ,[ identifier[indices] ], identifier[self] . identifier[observ_dtype] , identifier[name] = literal[string] )
identifier[observ] . identifier[set_shape] ( identifier[indices] . identifier[get_shape] (). identifier[concatenate] ( identifier[self] . identifier[observ_shape] ))
keyword[with] identifier[tf] . identifier[control_dependencies] ([
identifier[tf] . identifier[scatter_update] ( identifier[self] . identifier[_observ] , identifier[indices] , identifier[observ] )]):
keyword[return] identifier[tf] . identifier[identity] ( identifier[observ] )
|
def _reset_non_empty(self, indices):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
observ = tf.py_func(self._batch_env.reset, [indices], self.observ_dtype, name='reset')
observ.set_shape(indices.get_shape().concatenate(self.observ_shape))
with tf.control_dependencies([tf.scatter_update(self._observ, indices, observ)]):
return tf.identity(observ) # depends on [control=['with'], data=[]]
|
def _normalize_instancemethod(instance_method):
"""
wraps(instancemethod) returns a function, not an instancemethod so its repr() is all messed up;
we want the original repr to show up in the logs, therefore we do this trick
"""
if not hasattr(instance_method, 'im_self'):
return instance_method
def _func(*args, **kwargs):
return instance_method(*args, **kwargs)
_func.__name__ = repr(instance_method)
return _func
|
def function[_normalize_instancemethod, parameter[instance_method]]:
constant[
wraps(instancemethod) returns a function, not an instancemethod so its repr() is all messed up;
we want the original repr to show up in the logs, therefore we do this trick
]
if <ast.UnaryOp object at 0x7da20c6a8b80> begin[:]
return[name[instance_method]]
def function[_func, parameter[]]:
return[call[name[instance_method], parameter[<ast.Starred object at 0x7da20e9552a0>]]]
name[_func].__name__ assign[=] call[name[repr], parameter[name[instance_method]]]
return[name[_func]]
|
keyword[def] identifier[_normalize_instancemethod] ( identifier[instance_method] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[instance_method] , literal[string] ):
keyword[return] identifier[instance_method]
keyword[def] identifier[_func] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[instance_method] (* identifier[args] ,** identifier[kwargs] )
identifier[_func] . identifier[__name__] = identifier[repr] ( identifier[instance_method] )
keyword[return] identifier[_func]
|
def _normalize_instancemethod(instance_method):
"""
wraps(instancemethod) returns a function, not an instancemethod so its repr() is all messed up;
we want the original repr to show up in the logs, therefore we do this trick
"""
if not hasattr(instance_method, 'im_self'):
return instance_method # depends on [control=['if'], data=[]]
def _func(*args, **kwargs):
return instance_method(*args, **kwargs)
_func.__name__ = repr(instance_method)
return _func
|
def get_team(self, id):
"""
:calls: `GET /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param id: integer
:rtype: :class:`github.Team.Team`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/teams/" + str(id)
)
return github.Team.Team(self._requester, headers, data, completed=True)
|
def function[get_team, parameter[self, id]]:
constant[
:calls: `GET /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param id: integer
:rtype: :class:`github.Team.Team`
]
assert[call[name[isinstance], parameter[name[id], tuple[[<ast.Name object at 0x7da20c7caf50>, <ast.Name object at 0x7da20c7c8820>]]]]]
<ast.Tuple object at 0x7da20c7cba00> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[GET], binary_operation[constant[/teams/] + call[name[str], parameter[name[id]]]]]]
return[call[name[github].Team.Team, parameter[name[self]._requester, name[headers], name[data]]]]
|
keyword[def] identifier[get_team] ( identifier[self] , identifier[id] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[id] ,( identifier[int] , identifier[long] )), identifier[id]
identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] (
literal[string] ,
literal[string] + identifier[str] ( identifier[id] )
)
keyword[return] identifier[github] . identifier[Team] . identifier[Team] ( identifier[self] . identifier[_requester] , identifier[headers] , identifier[data] , identifier[completed] = keyword[True] )
|
def get_team(self, id):
"""
:calls: `GET /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param id: integer
:rtype: :class:`github.Team.Team`
"""
assert isinstance(id, (int, long)), id
(headers, data) = self._requester.requestJsonAndCheck('GET', '/teams/' + str(id))
return github.Team.Team(self._requester, headers, data, completed=True)
|
def update(self, E=None, **F):
'''flatten nested dictionaries to update pathwise
>>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}})
{'foo': {'bar': 'glork', 'blub': 'bla'}
In contrast to:
>>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}})
{'foo: {'blub': 'bla'}'}
'''
def _update(D):
for k,v in D.items():
if super(ConfigDict, self).__contains__(k):
if isinstance(self[k], ConfigDict):
self[k].update(v)
else:
self[k] = self.assimilate(v)
else:
self[k] = self.assimilate(v)
if E is not None:
if not hasattr(E, 'keys'):
E = self.assimilate(dict(E))
_update(E)
_update(F)
return self
|
def function[update, parameter[self, E]]:
constant[flatten nested dictionaries to update pathwise
>>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}})
{'foo': {'bar': 'glork', 'blub': 'bla'}
In contrast to:
>>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}})
{'foo: {'blub': 'bla'}'}
]
def function[_update, parameter[D]]:
for taget[tuple[[<ast.Name object at 0x7da2041da050>, <ast.Name object at 0x7da2041d9e40>]]] in starred[call[name[D].items, parameter[]]] begin[:]
if call[call[name[super], parameter[name[ConfigDict], name[self]]].__contains__, parameter[name[k]]] begin[:]
if call[name[isinstance], parameter[call[name[self]][name[k]], name[ConfigDict]]] begin[:]
call[call[name[self]][name[k]].update, parameter[name[v]]]
if compare[name[E] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da2041d9b10> begin[:]
variable[E] assign[=] call[name[self].assimilate, parameter[call[name[dict], parameter[name[E]]]]]
call[name[_update], parameter[name[E]]]
call[name[_update], parameter[name[F]]]
return[name[self]]
|
keyword[def] identifier[update] ( identifier[self] , identifier[E] = keyword[None] ,** identifier[F] ):
literal[string]
keyword[def] identifier[_update] ( identifier[D] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[D] . identifier[items] ():
keyword[if] identifier[super] ( identifier[ConfigDict] , identifier[self] ). identifier[__contains__] ( identifier[k] ):
keyword[if] identifier[isinstance] ( identifier[self] [ identifier[k] ], identifier[ConfigDict] ):
identifier[self] [ identifier[k] ]. identifier[update] ( identifier[v] )
keyword[else] :
identifier[self] [ identifier[k] ]= identifier[self] . identifier[assimilate] ( identifier[v] )
keyword[else] :
identifier[self] [ identifier[k] ]= identifier[self] . identifier[assimilate] ( identifier[v] )
keyword[if] identifier[E] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[E] , literal[string] ):
identifier[E] = identifier[self] . identifier[assimilate] ( identifier[dict] ( identifier[E] ))
identifier[_update] ( identifier[E] )
identifier[_update] ( identifier[F] )
keyword[return] identifier[self]
|
def update(self, E=None, **F):
"""flatten nested dictionaries to update pathwise
>>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}})
{'foo': {'bar': 'glork', 'blub': 'bla'}
In contrast to:
>>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}})
{'foo: {'blub': 'bla'}'}
"""
def _update(D):
for (k, v) in D.items():
if super(ConfigDict, self).__contains__(k):
if isinstance(self[k], ConfigDict):
self[k].update(v) # depends on [control=['if'], data=[]]
else:
self[k] = self.assimilate(v) # depends on [control=['if'], data=[]]
else:
self[k] = self.assimilate(v) # depends on [control=['for'], data=[]]
if E is not None:
if not hasattr(E, 'keys'):
E = self.assimilate(dict(E)) # depends on [control=['if'], data=[]]
_update(E) # depends on [control=['if'], data=['E']]
_update(F)
return self
|
def realimag_files(xscript=0, yscript="d[1]+1j*d[2]", eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=realimag_databoxes, paths=paths, g=g, **kwargs)
|
def function[realimag_files, parameter[xscript, yscript, eyscript, exscript, paths, g]]:
constant[
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
]
return[call[name[files], parameter[name[xscript], name[yscript], name[eyscript], name[exscript]]]]
|
keyword[def] identifier[realimag_files] ( identifier[xscript] = literal[int] , identifier[yscript] = literal[string] , identifier[eyscript] = keyword[None] , identifier[exscript] = keyword[None] , identifier[paths] = keyword[None] , identifier[g] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[files] ( identifier[xscript] , identifier[yscript] , identifier[eyscript] , identifier[exscript] , identifier[plotter] = identifier[realimag_databoxes] , identifier[paths] = identifier[paths] , identifier[g] = identifier[g] ,** identifier[kwargs] )
|
def realimag_files(xscript=0, yscript='d[1]+1j*d[2]', eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=realimag_databoxes, paths=paths, g=g, **kwargs)
|
def compress(obj, level=6, return_type="bytes"):
"""Compress anything to bytes or string.
:param obj: could be any object, usually it could be binary, string, or
regular python objec.t
:param level:
:param return_type: if bytes, then return bytes; if str, then return
base64.b64encode bytes in utf-8 string.
"""
if isinstance(obj, binary_type):
b = _compress_bytes(obj, level)
elif isinstance(obj, string_types):
b = _compress_str(obj, level)
else:
b = _compress_obj(obj, level)
if return_type == "bytes":
return b
elif return_type == "str":
return base64.b64encode(b).decode("utf-8")
else:
raise ValueError("'return_type' has to be one of 'bytes', 'str'!")
|
def function[compress, parameter[obj, level, return_type]]:
constant[Compress anything to bytes or string.
:param obj: could be any object, usually it could be binary, string, or
regular python objec.t
:param level:
:param return_type: if bytes, then return bytes; if str, then return
base64.b64encode bytes in utf-8 string.
]
if call[name[isinstance], parameter[name[obj], name[binary_type]]] begin[:]
variable[b] assign[=] call[name[_compress_bytes], parameter[name[obj], name[level]]]
if compare[name[return_type] equal[==] constant[bytes]] begin[:]
return[name[b]]
|
keyword[def] identifier[compress] ( identifier[obj] , identifier[level] = literal[int] , identifier[return_type] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[binary_type] ):
identifier[b] = identifier[_compress_bytes] ( identifier[obj] , identifier[level] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[string_types] ):
identifier[b] = identifier[_compress_str] ( identifier[obj] , identifier[level] )
keyword[else] :
identifier[b] = identifier[_compress_obj] ( identifier[obj] , identifier[level] )
keyword[if] identifier[return_type] == literal[string] :
keyword[return] identifier[b]
keyword[elif] identifier[return_type] == literal[string] :
keyword[return] identifier[base64] . identifier[b64encode] ( identifier[b] ). identifier[decode] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def compress(obj, level=6, return_type='bytes'):
"""Compress anything to bytes or string.
:param obj: could be any object, usually it could be binary, string, or
regular python objec.t
:param level:
:param return_type: if bytes, then return bytes; if str, then return
base64.b64encode bytes in utf-8 string.
"""
if isinstance(obj, binary_type):
b = _compress_bytes(obj, level) # depends on [control=['if'], data=[]]
elif isinstance(obj, string_types):
b = _compress_str(obj, level) # depends on [control=['if'], data=[]]
else:
b = _compress_obj(obj, level)
if return_type == 'bytes':
return b # depends on [control=['if'], data=[]]
elif return_type == 'str':
return base64.b64encode(b).decode('utf-8') # depends on [control=['if'], data=[]]
else:
raise ValueError("'return_type' has to be one of 'bytes', 'str'!")
|
def running_window(iterable, size):
"""Generate n-size running window.
Example::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
**中文文档**
简单滑窗函数。
"""
if size > len(iterable):
raise ValueError("size can not be greater than length of iterable.")
fifo = collections.deque(maxlen=size)
for i in iterable:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
|
def function[running_window, parameter[iterable, size]]:
constant[Generate n-size running window.
Example::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
**中文文档**
简单滑窗函数。
]
if compare[name[size] greater[>] call[name[len], parameter[name[iterable]]]] begin[:]
<ast.Raise object at 0x7da1b2372dd0>
variable[fifo] assign[=] call[name[collections].deque, parameter[]]
for taget[name[i]] in starred[name[iterable]] begin[:]
call[name[fifo].append, parameter[name[i]]]
if compare[call[name[len], parameter[name[fifo]]] equal[==] name[size]] begin[:]
<ast.Yield object at 0x7da1b23726b0>
|
keyword[def] identifier[running_window] ( identifier[iterable] , identifier[size] ):
literal[string]
keyword[if] identifier[size] > identifier[len] ( identifier[iterable] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[fifo] = identifier[collections] . identifier[deque] ( identifier[maxlen] = identifier[size] )
keyword[for] identifier[i] keyword[in] identifier[iterable] :
identifier[fifo] . identifier[append] ( identifier[i] )
keyword[if] identifier[len] ( identifier[fifo] )== identifier[size] :
keyword[yield] identifier[list] ( identifier[fifo] )
|
def running_window(iterable, size):
"""Generate n-size running window.
Example::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
**中文文档**
简单滑窗函数。
"""
if size > len(iterable):
raise ValueError('size can not be greater than length of iterable.') # depends on [control=['if'], data=[]]
fifo = collections.deque(maxlen=size)
for i in iterable:
fifo.append(i)
if len(fifo) == size:
yield list(fifo) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
|
def read_analysis(self, file_handle):
"""Read the ANALYSIS segment of the FCS file and store it in self.analysis.
Warning: This has never been tested with an actual fcs file that contains an
analysis segment.
Args:
file_handle: buffer containing FCS data
"""
start = self.annotation['__header__']['analysis start']
end = self.annotation['__header__']['analysis end']
if start != 0 and end != 0:
file_handle.seek(start, 0)
self._analysis = file_handle.read(end - start)
else:
self._analysis = None
|
def function[read_analysis, parameter[self, file_handle]]:
constant[Read the ANALYSIS segment of the FCS file and store it in self.analysis.
Warning: This has never been tested with an actual fcs file that contains an
analysis segment.
Args:
file_handle: buffer containing FCS data
]
variable[start] assign[=] call[call[name[self].annotation][constant[__header__]]][constant[analysis start]]
variable[end] assign[=] call[call[name[self].annotation][constant[__header__]]][constant[analysis end]]
if <ast.BoolOp object at 0x7da1b04d9ea0> begin[:]
call[name[file_handle].seek, parameter[name[start], constant[0]]]
name[self]._analysis assign[=] call[name[file_handle].read, parameter[binary_operation[name[end] - name[start]]]]
|
keyword[def] identifier[read_analysis] ( identifier[self] , identifier[file_handle] ):
literal[string]
identifier[start] = identifier[self] . identifier[annotation] [ literal[string] ][ literal[string] ]
identifier[end] = identifier[self] . identifier[annotation] [ literal[string] ][ literal[string] ]
keyword[if] identifier[start] != literal[int] keyword[and] identifier[end] != literal[int] :
identifier[file_handle] . identifier[seek] ( identifier[start] , literal[int] )
identifier[self] . identifier[_analysis] = identifier[file_handle] . identifier[read] ( identifier[end] - identifier[start] )
keyword[else] :
identifier[self] . identifier[_analysis] = keyword[None]
|
def read_analysis(self, file_handle):
"""Read the ANALYSIS segment of the FCS file and store it in self.analysis.
Warning: This has never been tested with an actual fcs file that contains an
analysis segment.
Args:
file_handle: buffer containing FCS data
"""
start = self.annotation['__header__']['analysis start']
end = self.annotation['__header__']['analysis end']
if start != 0 and end != 0:
file_handle.seek(start, 0)
self._analysis = file_handle.read(end - start) # depends on [control=['if'], data=[]]
else:
self._analysis = None
|
def to_bytes(self):
'''
Return packed byte representation of the TCP header.
'''
header = self._make_header(self._checksum)
return header + self._options.to_bytes()
|
def function[to_bytes, parameter[self]]:
constant[
Return packed byte representation of the TCP header.
]
variable[header] assign[=] call[name[self]._make_header, parameter[name[self]._checksum]]
return[binary_operation[name[header] + call[name[self]._options.to_bytes, parameter[]]]]
|
keyword[def] identifier[to_bytes] ( identifier[self] ):
literal[string]
identifier[header] = identifier[self] . identifier[_make_header] ( identifier[self] . identifier[_checksum] )
keyword[return] identifier[header] + identifier[self] . identifier[_options] . identifier[to_bytes] ()
|
def to_bytes(self):
"""
Return packed byte representation of the TCP header.
"""
header = self._make_header(self._checksum)
return header + self._options.to_bytes()
|
def update(self):
"""Update |C2| based on :math:`c_2 = 1.-c_1-c_3`.
Examples:
The following examples show the calculated value of |C2| are
clipped when to low or to high:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> derived.c1 = 0.6
>>> derived.c3 = 0.1
>>> derived.c2.update()
>>> derived.c2
c2(0.3)
>>> derived.c1 = 1.6
>>> derived.c2.update()
>>> derived.c2
c2(0.0)
>>> derived.c1 = -1.6
>>> derived.c2.update()
>>> derived.c2
c2(1.0)
"""
der = self.subpars
self(numpy.clip(1. - der.c1 - der.c3, 0., 1.))
|
def function[update, parameter[self]]:
constant[Update |C2| based on :math:`c_2 = 1.-c_1-c_3`.
Examples:
The following examples show the calculated value of |C2| are
clipped when to low or to high:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> derived.c1 = 0.6
>>> derived.c3 = 0.1
>>> derived.c2.update()
>>> derived.c2
c2(0.3)
>>> derived.c1 = 1.6
>>> derived.c2.update()
>>> derived.c2
c2(0.0)
>>> derived.c1 = -1.6
>>> derived.c2.update()
>>> derived.c2
c2(1.0)
]
variable[der] assign[=] name[self].subpars
call[name[self], parameter[call[name[numpy].clip, parameter[binary_operation[binary_operation[constant[1.0] - name[der].c1] - name[der].c3], constant[0.0], constant[1.0]]]]]
|
keyword[def] identifier[update] ( identifier[self] ):
literal[string]
identifier[der] = identifier[self] . identifier[subpars]
identifier[self] ( identifier[numpy] . identifier[clip] ( literal[int] - identifier[der] . identifier[c1] - identifier[der] . identifier[c3] , literal[int] , literal[int] ))
|
def update(self):
"""Update |C2| based on :math:`c_2 = 1.-c_1-c_3`.
Examples:
The following examples show the calculated value of |C2| are
clipped when to low or to high:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> derived.c1 = 0.6
>>> derived.c3 = 0.1
>>> derived.c2.update()
>>> derived.c2
c2(0.3)
>>> derived.c1 = 1.6
>>> derived.c2.update()
>>> derived.c2
c2(0.0)
>>> derived.c1 = -1.6
>>> derived.c2.update()
>>> derived.c2
c2(1.0)
"""
der = self.subpars
self(numpy.clip(1.0 - der.c1 - der.c3, 0.0, 1.0))
|
def get_mechs_available():
"""
Returns a list of auth mechanisms that are available to the local
GSSAPI instance. Because we are interacting with Windows, we only
care if SPNEGO, Kerberos and NTLM are available where NTLM is the
only wildcard that may not be available by default.
The only NTLM implementation that works properly is gss-ntlmssp and
part of this test is to verify the gss-ntlmssp OID
GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required
for SPNEGO and NTLM to work properly.
:return: list - A list of supported mechs available in the installed
version of GSSAPI
"""
ntlm_oid = GSSAPIContext._AUTH_MECHANISMS['ntlm']
ntlm_mech = gssapi.OID.from_int_seq(ntlm_oid)
# GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH
# github.com/simo5/gss-ntlmssp/blob/master/src/gssapi_ntlmssp.h#L68
reset_mech = gssapi.OID.from_int_seq("1.3.6.1.4.1.7165.655.1.3")
try:
# we don't actually care about the account used here so just use
# a random username and password
ntlm_context = GSSAPIContext._get_security_context(
gssapi.NameType.user,
ntlm_mech,
"http@server",
"username",
"password"
)
ntlm_context.step()
set_sec_context_option(reset_mech, context=ntlm_context,
value=b"\x00" * 4)
except gssapi.exceptions.GSSError as exc:
# failed to init NTLM and verify gss-ntlmssp is available, this
# means NTLM is either not available or won't work
# (not gss-ntlmssp) so we return kerberos as the only available
# mechanism for the GSSAPI Context
log.debug("Failed to init test NTLM context with GSSAPI: %s"
% str(exc))
return ['kerberos']
else:
return ['auto', 'kerberos', 'ntlm']
|
def function[get_mechs_available, parameter[]]:
constant[
Returns a list of auth mechanisms that are available to the local
GSSAPI instance. Because we are interacting with Windows, we only
care if SPNEGO, Kerberos and NTLM are available where NTLM is the
only wildcard that may not be available by default.
The only NTLM implementation that works properly is gss-ntlmssp and
part of this test is to verify the gss-ntlmssp OID
GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required
for SPNEGO and NTLM to work properly.
:return: list - A list of supported mechs available in the installed
version of GSSAPI
]
variable[ntlm_oid] assign[=] call[name[GSSAPIContext]._AUTH_MECHANISMS][constant[ntlm]]
variable[ntlm_mech] assign[=] call[name[gssapi].OID.from_int_seq, parameter[name[ntlm_oid]]]
variable[reset_mech] assign[=] call[name[gssapi].OID.from_int_seq, parameter[constant[1.3.6.1.4.1.7165.655.1.3]]]
<ast.Try object at 0x7da1b0624250>
|
keyword[def] identifier[get_mechs_available] ():
literal[string]
identifier[ntlm_oid] = identifier[GSSAPIContext] . identifier[_AUTH_MECHANISMS] [ literal[string] ]
identifier[ntlm_mech] = identifier[gssapi] . identifier[OID] . identifier[from_int_seq] ( identifier[ntlm_oid] )
identifier[reset_mech] = identifier[gssapi] . identifier[OID] . identifier[from_int_seq] ( literal[string] )
keyword[try] :
identifier[ntlm_context] = identifier[GSSAPIContext] . identifier[_get_security_context] (
identifier[gssapi] . identifier[NameType] . identifier[user] ,
identifier[ntlm_mech] ,
literal[string] ,
literal[string] ,
literal[string]
)
identifier[ntlm_context] . identifier[step] ()
identifier[set_sec_context_option] ( identifier[reset_mech] , identifier[context] = identifier[ntlm_context] ,
identifier[value] = literal[string] * literal[int] )
keyword[except] identifier[gssapi] . identifier[exceptions] . identifier[GSSError] keyword[as] identifier[exc] :
identifier[log] . identifier[debug] ( literal[string]
% identifier[str] ( identifier[exc] ))
keyword[return] [ literal[string] ]
keyword[else] :
keyword[return] [ literal[string] , literal[string] , literal[string] ]
|
def get_mechs_available():
"""
Returns a list of auth mechanisms that are available to the local
GSSAPI instance. Because we are interacting with Windows, we only
care if SPNEGO, Kerberos and NTLM are available where NTLM is the
only wildcard that may not be available by default.
The only NTLM implementation that works properly is gss-ntlmssp and
part of this test is to verify the gss-ntlmssp OID
GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required
for SPNEGO and NTLM to work properly.
:return: list - A list of supported mechs available in the installed
version of GSSAPI
"""
ntlm_oid = GSSAPIContext._AUTH_MECHANISMS['ntlm']
ntlm_mech = gssapi.OID.from_int_seq(ntlm_oid)
# GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH
# github.com/simo5/gss-ntlmssp/blob/master/src/gssapi_ntlmssp.h#L68
reset_mech = gssapi.OID.from_int_seq('1.3.6.1.4.1.7165.655.1.3')
try:
# we don't actually care about the account used here so just use
# a random username and password
ntlm_context = GSSAPIContext._get_security_context(gssapi.NameType.user, ntlm_mech, 'http@server', 'username', 'password')
ntlm_context.step()
set_sec_context_option(reset_mech, context=ntlm_context, value=b'\x00' * 4) # depends on [control=['try'], data=[]]
except gssapi.exceptions.GSSError as exc:
# failed to init NTLM and verify gss-ntlmssp is available, this
# means NTLM is either not available or won't work
# (not gss-ntlmssp) so we return kerberos as the only available
# mechanism for the GSSAPI Context
log.debug('Failed to init test NTLM context with GSSAPI: %s' % str(exc))
return ['kerberos'] # depends on [control=['except'], data=['exc']]
else:
return ['auto', 'kerberos', 'ntlm']
|
def json_2_routing_area(json_obj):
"""
transform JSON obj coming from Ariane to ariane_clip3 object
:param json_obj: the JSON obj coming from Ariane
:return: ariane_clip3 RoutingArea object
"""
LOGGER.debug("RoutingArea.json_2_routing_area")
return RoutingArea(raid=json_obj['routingAreaID'],
name=json_obj['routingAreaName'],
description=json_obj['routingAreaDescription'],
ra_type=json_obj['routingAreaType'],
multicast=json_obj['routingAreaMulticast'],
routing_area_loc_ids=json_obj['routingAreaLocationsID'],
routing_area_subnet_ids=json_obj['routingAreaSubnetsID'])
|
def function[json_2_routing_area, parameter[json_obj]]:
constant[
transform JSON obj coming from Ariane to ariane_clip3 object
:param json_obj: the JSON obj coming from Ariane
:return: ariane_clip3 RoutingArea object
]
call[name[LOGGER].debug, parameter[constant[RoutingArea.json_2_routing_area]]]
return[call[name[RoutingArea], parameter[]]]
|
keyword[def] identifier[json_2_routing_area] ( identifier[json_obj] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[return] identifier[RoutingArea] ( identifier[raid] = identifier[json_obj] [ literal[string] ],
identifier[name] = identifier[json_obj] [ literal[string] ],
identifier[description] = identifier[json_obj] [ literal[string] ],
identifier[ra_type] = identifier[json_obj] [ literal[string] ],
identifier[multicast] = identifier[json_obj] [ literal[string] ],
identifier[routing_area_loc_ids] = identifier[json_obj] [ literal[string] ],
identifier[routing_area_subnet_ids] = identifier[json_obj] [ literal[string] ])
|
def json_2_routing_area(json_obj):
"""
transform JSON obj coming from Ariane to ariane_clip3 object
:param json_obj: the JSON obj coming from Ariane
:return: ariane_clip3 RoutingArea object
"""
LOGGER.debug('RoutingArea.json_2_routing_area')
return RoutingArea(raid=json_obj['routingAreaID'], name=json_obj['routingAreaName'], description=json_obj['routingAreaDescription'], ra_type=json_obj['routingAreaType'], multicast=json_obj['routingAreaMulticast'], routing_area_loc_ids=json_obj['routingAreaLocationsID'], routing_area_subnet_ids=json_obj['routingAreaSubnetsID'])
|
def add_badge(self, kind):
'''Perform an atomic prepend for a new badge'''
badge = self.get_badge(kind)
if badge:
return badge
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.format(model=self.__class__.__name__,
kind=kind))
badge = Badge(kind=kind)
if current_user.is_authenticated:
badge.created_by = current_user.id
self.update(__raw__={
'$push': {
'badges': {
'$each': [badge.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self)
on_badge_added.send(self, kind=kind)
return self.get_badge(kind)
|
def function[add_badge, parameter[self, kind]]:
constant[Perform an atomic prepend for a new badge]
variable[badge] assign[=] call[name[self].get_badge, parameter[name[kind]]]
if name[badge] begin[:]
return[name[badge]]
if compare[name[kind] <ast.NotIn object at 0x7da2590d7190> call[name[getattr], parameter[name[self], constant[__badges__], dictionary[[], []]]]] begin[:]
variable[msg] assign[=] constant[Unknown badge type for {model}: {kind}]
<ast.Raise object at 0x7da18f09d390>
variable[badge] assign[=] call[name[Badge], parameter[]]
if name[current_user].is_authenticated begin[:]
name[badge].created_by assign[=] name[current_user].id
call[name[self].update, parameter[]]
call[name[self].reload, parameter[]]
call[name[post_save].send, parameter[name[self].__class__]]
call[name[on_badge_added].send, parameter[name[self]]]
return[call[name[self].get_badge, parameter[name[kind]]]]
|
keyword[def] identifier[add_badge] ( identifier[self] , identifier[kind] ):
literal[string]
identifier[badge] = identifier[self] . identifier[get_badge] ( identifier[kind] )
keyword[if] identifier[badge] :
keyword[return] identifier[badge]
keyword[if] identifier[kind] keyword[not] keyword[in] identifier[getattr] ( identifier[self] , literal[string] ,{}):
identifier[msg] = literal[string]
keyword[raise] identifier[db] . identifier[ValidationError] ( identifier[msg] . identifier[format] ( identifier[model] = identifier[self] . identifier[__class__] . identifier[__name__] ,
identifier[kind] = identifier[kind] ))
identifier[badge] = identifier[Badge] ( identifier[kind] = identifier[kind] )
keyword[if] identifier[current_user] . identifier[is_authenticated] :
identifier[badge] . identifier[created_by] = identifier[current_user] . identifier[id]
identifier[self] . identifier[update] ( identifier[__raw__] ={
literal[string] :{
literal[string] :{
literal[string] :[ identifier[badge] . identifier[to_mongo] ()],
literal[string] : literal[int]
}
}
})
identifier[self] . identifier[reload] ()
identifier[post_save] . identifier[send] ( identifier[self] . identifier[__class__] , identifier[document] = identifier[self] )
identifier[on_badge_added] . identifier[send] ( identifier[self] , identifier[kind] = identifier[kind] )
keyword[return] identifier[self] . identifier[get_badge] ( identifier[kind] )
|
def add_badge(self, kind):
"""Perform an atomic prepend for a new badge"""
badge = self.get_badge(kind)
if badge:
return badge # depends on [control=['if'], data=[]]
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.format(model=self.__class__.__name__, kind=kind)) # depends on [control=['if'], data=['kind']]
badge = Badge(kind=kind)
if current_user.is_authenticated:
badge.created_by = current_user.id # depends on [control=['if'], data=[]]
self.update(__raw__={'$push': {'badges': {'$each': [badge.to_mongo()], '$position': 0}}})
self.reload()
post_save.send(self.__class__, document=self)
on_badge_added.send(self, kind=kind)
return self.get_badge(kind)
|
def get_initial_array(events, slots, seed=None):
"""
Obtain a random initial array.
"""
if seed is not None:
np.random.seed(seed)
m = len(events)
n = len(slots)
X = np.zeros((m, n))
for i, row in enumerate(X):
X[i, i] = 1
np.random.shuffle(X)
return X
|
def function[get_initial_array, parameter[events, slots, seed]]:
constant[
Obtain a random initial array.
]
if compare[name[seed] is_not constant[None]] begin[:]
call[name[np].random.seed, parameter[name[seed]]]
variable[m] assign[=] call[name[len], parameter[name[events]]]
variable[n] assign[=] call[name[len], parameter[name[slots]]]
variable[X] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b0432e60>, <ast.Name object at 0x7da1b04309d0>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0431b10>, <ast.Name object at 0x7da1b04334c0>]]] in starred[call[name[enumerate], parameter[name[X]]]] begin[:]
call[name[X]][tuple[[<ast.Name object at 0x7da1b0430af0>, <ast.Name object at 0x7da1b04308b0>]]] assign[=] constant[1]
call[name[np].random.shuffle, parameter[name[X]]]
return[name[X]]
|
keyword[def] identifier[get_initial_array] ( identifier[events] , identifier[slots] , identifier[seed] = keyword[None] ):
literal[string]
keyword[if] identifier[seed] keyword[is] keyword[not] keyword[None] :
identifier[np] . identifier[random] . identifier[seed] ( identifier[seed] )
identifier[m] = identifier[len] ( identifier[events] )
identifier[n] = identifier[len] ( identifier[slots] )
identifier[X] = identifier[np] . identifier[zeros] (( identifier[m] , identifier[n] ))
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[X] ):
identifier[X] [ identifier[i] , identifier[i] ]= literal[int]
identifier[np] . identifier[random] . identifier[shuffle] ( identifier[X] )
keyword[return] identifier[X]
|
def get_initial_array(events, slots, seed=None):
"""
Obtain a random initial array.
"""
if seed is not None:
np.random.seed(seed) # depends on [control=['if'], data=['seed']]
m = len(events)
n = len(slots)
X = np.zeros((m, n))
for (i, row) in enumerate(X):
X[i, i] = 1 # depends on [control=['for'], data=[]]
np.random.shuffle(X)
return X
|
def quantize(
self,
input=None,
qout=False,
cutoff=0,
retrain=False,
epoch=None,
lr=None,
thread=None,
verbose=None,
dsub=2,
qnorm=False
):
"""
Quantize the model reducing the size of the model and
it's memory footprint.
"""
a = self.f.getArgs()
if not epoch:
epoch = a.epoch
if not lr:
lr = a.lr
if not thread:
thread = a.thread
if not verbose:
verbose = a.verbose
if retrain and not input:
raise ValueError("Need input file path if retraining")
if input is None:
input = ""
self.f.quantize(
input, qout, cutoff, retrain, epoch, lr, thread, verbose, dsub,
qnorm
)
|
def function[quantize, parameter[self, input, qout, cutoff, retrain, epoch, lr, thread, verbose, dsub, qnorm]]:
constant[
Quantize the model reducing the size of the model and
it's memory footprint.
]
variable[a] assign[=] call[name[self].f.getArgs, parameter[]]
if <ast.UnaryOp object at 0x7da2047e9600> begin[:]
variable[epoch] assign[=] name[a].epoch
if <ast.UnaryOp object at 0x7da2047e89d0> begin[:]
variable[lr] assign[=] name[a].lr
if <ast.UnaryOp object at 0x7da2047eb520> begin[:]
variable[thread] assign[=] name[a].thread
if <ast.UnaryOp object at 0x7da2047e8490> begin[:]
variable[verbose] assign[=] name[a].verbose
if <ast.BoolOp object at 0x7da2047e8d60> begin[:]
<ast.Raise object at 0x7da2047e8e20>
if compare[name[input] is constant[None]] begin[:]
variable[input] assign[=] constant[]
call[name[self].f.quantize, parameter[name[input], name[qout], name[cutoff], name[retrain], name[epoch], name[lr], name[thread], name[verbose], name[dsub], name[qnorm]]]
|
keyword[def] identifier[quantize] (
identifier[self] ,
identifier[input] = keyword[None] ,
identifier[qout] = keyword[False] ,
identifier[cutoff] = literal[int] ,
identifier[retrain] = keyword[False] ,
identifier[epoch] = keyword[None] ,
identifier[lr] = keyword[None] ,
identifier[thread] = keyword[None] ,
identifier[verbose] = keyword[None] ,
identifier[dsub] = literal[int] ,
identifier[qnorm] = keyword[False]
):
literal[string]
identifier[a] = identifier[self] . identifier[f] . identifier[getArgs] ()
keyword[if] keyword[not] identifier[epoch] :
identifier[epoch] = identifier[a] . identifier[epoch]
keyword[if] keyword[not] identifier[lr] :
identifier[lr] = identifier[a] . identifier[lr]
keyword[if] keyword[not] identifier[thread] :
identifier[thread] = identifier[a] . identifier[thread]
keyword[if] keyword[not] identifier[verbose] :
identifier[verbose] = identifier[a] . identifier[verbose]
keyword[if] identifier[retrain] keyword[and] keyword[not] identifier[input] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[input] keyword[is] keyword[None] :
identifier[input] = literal[string]
identifier[self] . identifier[f] . identifier[quantize] (
identifier[input] , identifier[qout] , identifier[cutoff] , identifier[retrain] , identifier[epoch] , identifier[lr] , identifier[thread] , identifier[verbose] , identifier[dsub] ,
identifier[qnorm]
)
|
def quantize(self, input=None, qout=False, cutoff=0, retrain=False, epoch=None, lr=None, thread=None, verbose=None, dsub=2, qnorm=False):
"""
Quantize the model reducing the size of the model and
it's memory footprint.
"""
a = self.f.getArgs()
if not epoch:
epoch = a.epoch # depends on [control=['if'], data=[]]
if not lr:
lr = a.lr # depends on [control=['if'], data=[]]
if not thread:
thread = a.thread # depends on [control=['if'], data=[]]
if not verbose:
verbose = a.verbose # depends on [control=['if'], data=[]]
if retrain and (not input):
raise ValueError('Need input file path if retraining') # depends on [control=['if'], data=[]]
if input is None:
input = '' # depends on [control=['if'], data=['input']]
self.f.quantize(input, qout, cutoff, retrain, epoch, lr, thread, verbose, dsub, qnorm)
|
def _get_vm_by_id(vmid, allDetails=False):
'''
Retrieve a VM based on the ID.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)):
if six.text_type(vm_details['vmid']) == six.text_type(vmid):
return vm_details
log.info('VM with ID "%s" could not be found.', vmid)
return False
|
def function[_get_vm_by_id, parameter[vmid, allDetails]]:
constant[
Retrieve a VM based on the ID.
]
for taget[tuple[[<ast.Name object at 0x7da18bc722c0>, <ast.Name object at 0x7da18bc73040>]]] in starred[call[name[six].iteritems, parameter[call[name[get_resources_vms], parameter[]]]]] begin[:]
if compare[call[name[six].text_type, parameter[call[name[vm_details]][constant[vmid]]]] equal[==] call[name[six].text_type, parameter[name[vmid]]]] begin[:]
return[name[vm_details]]
call[name[log].info, parameter[constant[VM with ID "%s" could not be found.], name[vmid]]]
return[constant[False]]
|
keyword[def] identifier[_get_vm_by_id] ( identifier[vmid] , identifier[allDetails] = keyword[False] ):
literal[string]
keyword[for] identifier[vm_name] , identifier[vm_details] keyword[in] identifier[six] . identifier[iteritems] ( identifier[get_resources_vms] ( identifier[includeConfig] = identifier[allDetails] )):
keyword[if] identifier[six] . identifier[text_type] ( identifier[vm_details] [ literal[string] ])== identifier[six] . identifier[text_type] ( identifier[vmid] ):
keyword[return] identifier[vm_details]
identifier[log] . identifier[info] ( literal[string] , identifier[vmid] )
keyword[return] keyword[False]
|
def _get_vm_by_id(vmid, allDetails=False):
"""
Retrieve a VM based on the ID.
"""
for (vm_name, vm_details) in six.iteritems(get_resources_vms(includeConfig=allDetails)):
if six.text_type(vm_details['vmid']) == six.text_type(vmid):
return vm_details # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
log.info('VM with ID "%s" could not be found.', vmid)
return False
|
def index(credentials=None):
"""Get list of projects"""
user, oauth_access_token = parsecredentials(credentials)
if not settings.ADMINS or user not in settings.ADMINS:
return flask.make_response('You shall not pass!!! You are not an administrator!',403)
usersprojects = {}
totalsize = {}
for f in glob.glob(settings.ROOT + "projects/*"):
if os.path.isdir(f):
u = os.path.basename(f)
usersprojects[u], totalsize[u] = getprojects(u)
usersprojects[u].sort()
return withheaders(flask.make_response(flask.render_template('admin.html',
version=VERSION,
system_id=settings.SYSTEM_ID,
system_name=settings.SYSTEM_NAME,
system_description=settings.SYSTEM_DESCRIPTION,
system_author=settings.SYSTEM_AUTHOR,
system_version=settings.SYSTEM_VERSION,
system_email=settings.SYSTEM_EMAIL,
user=user,
url=getrooturl(),
usersprojects = sorted(usersprojects.items()),
totalsize=totalsize,
allow_origin=settings.ALLOW_ORIGIN,
oauth_access_token=oauth_encrypt(oauth_access_token)
)), "text/html; charset=UTF-8", {'allow_origin':settings.ALLOW_ORIGIN})
|
def function[index, parameter[credentials]]:
constant[Get list of projects]
<ast.Tuple object at 0x7da20c6e6d40> assign[=] call[name[parsecredentials], parameter[name[credentials]]]
if <ast.BoolOp object at 0x7da1b1800e20> begin[:]
return[call[name[flask].make_response, parameter[constant[You shall not pass!!! You are not an administrator!], constant[403]]]]
variable[usersprojects] assign[=] dictionary[[], []]
variable[totalsize] assign[=] dictionary[[], []]
for taget[name[f]] in starred[call[name[glob].glob, parameter[binary_operation[name[settings].ROOT + constant[projects/*]]]]] begin[:]
if call[name[os].path.isdir, parameter[name[f]]] begin[:]
variable[u] assign[=] call[name[os].path.basename, parameter[name[f]]]
<ast.Tuple object at 0x7da1b1800370> assign[=] call[name[getprojects], parameter[name[u]]]
call[call[name[usersprojects]][name[u]].sort, parameter[]]
return[call[name[withheaders], parameter[call[name[flask].make_response, parameter[call[name[flask].render_template, parameter[constant[admin.html]]]]], constant[text/html; charset=UTF-8], dictionary[[<ast.Constant object at 0x7da20c6e7070>], [<ast.Attribute object at 0x7da20c6e5b40>]]]]]
|
keyword[def] identifier[index] ( identifier[credentials] = keyword[None] ):
literal[string]
identifier[user] , identifier[oauth_access_token] = identifier[parsecredentials] ( identifier[credentials] )
keyword[if] keyword[not] identifier[settings] . identifier[ADMINS] keyword[or] identifier[user] keyword[not] keyword[in] identifier[settings] . identifier[ADMINS] :
keyword[return] identifier[flask] . identifier[make_response] ( literal[string] , literal[int] )
identifier[usersprojects] ={}
identifier[totalsize] ={}
keyword[for] identifier[f] keyword[in] identifier[glob] . identifier[glob] ( identifier[settings] . identifier[ROOT] + literal[string] ):
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[f] ):
identifier[u] = identifier[os] . identifier[path] . identifier[basename] ( identifier[f] )
identifier[usersprojects] [ identifier[u] ], identifier[totalsize] [ identifier[u] ]= identifier[getprojects] ( identifier[u] )
identifier[usersprojects] [ identifier[u] ]. identifier[sort] ()
keyword[return] identifier[withheaders] ( identifier[flask] . identifier[make_response] ( identifier[flask] . identifier[render_template] ( literal[string] ,
identifier[version] = identifier[VERSION] ,
identifier[system_id] = identifier[settings] . identifier[SYSTEM_ID] ,
identifier[system_name] = identifier[settings] . identifier[SYSTEM_NAME] ,
identifier[system_description] = identifier[settings] . identifier[SYSTEM_DESCRIPTION] ,
identifier[system_author] = identifier[settings] . identifier[SYSTEM_AUTHOR] ,
identifier[system_version] = identifier[settings] . identifier[SYSTEM_VERSION] ,
identifier[system_email] = identifier[settings] . identifier[SYSTEM_EMAIL] ,
identifier[user] = identifier[user] ,
identifier[url] = identifier[getrooturl] (),
identifier[usersprojects] = identifier[sorted] ( identifier[usersprojects] . identifier[items] ()),
identifier[totalsize] = identifier[totalsize] ,
identifier[allow_origin] = identifier[settings] . identifier[ALLOW_ORIGIN] ,
identifier[oauth_access_token] = identifier[oauth_encrypt] ( identifier[oauth_access_token] )
)), literal[string] ,{ literal[string] : identifier[settings] . identifier[ALLOW_ORIGIN] })
|
def index(credentials=None):
"""Get list of projects"""
(user, oauth_access_token) = parsecredentials(credentials)
if not settings.ADMINS or user not in settings.ADMINS:
return flask.make_response('You shall not pass!!! You are not an administrator!', 403) # depends on [control=['if'], data=[]]
usersprojects = {}
totalsize = {}
for f in glob.glob(settings.ROOT + 'projects/*'):
if os.path.isdir(f):
u = os.path.basename(f)
(usersprojects[u], totalsize[u]) = getprojects(u)
usersprojects[u].sort() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return withheaders(flask.make_response(flask.render_template('admin.html', version=VERSION, system_id=settings.SYSTEM_ID, system_name=settings.SYSTEM_NAME, system_description=settings.SYSTEM_DESCRIPTION, system_author=settings.SYSTEM_AUTHOR, system_version=settings.SYSTEM_VERSION, system_email=settings.SYSTEM_EMAIL, user=user, url=getrooturl(), usersprojects=sorted(usersprojects.items()), totalsize=totalsize, allow_origin=settings.ALLOW_ORIGIN, oauth_access_token=oauth_encrypt(oauth_access_token))), 'text/html; charset=UTF-8', {'allow_origin': settings.ALLOW_ORIGIN})
|
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
def function[set_attributes, parameter[name, attributes, region, key, keyid, profile]]:
constant[
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
variable[attributes] assign[=] call[name[_preprocess_attributes], parameter[name[attributes]]]
<ast.Try object at 0x7da1b21e1150>
return[dictionary[[<ast.Constant object at 0x7da1b21e3760>], [<ast.Constant object at 0x7da1b21e09a0>]]]
|
keyword[def] identifier[set_attributes] (
identifier[name] ,
identifier[attributes] ,
identifier[region] = keyword[None] ,
identifier[key] = keyword[None] ,
identifier[keyid] = keyword[None] ,
identifier[profile] = keyword[None] ,
):
literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[attributes] = identifier[_preprocess_attributes] ( identifier[attributes] )
keyword[try] :
identifier[url] = identifier[conn] . identifier[get_queue_url] ( identifier[QueueName] = identifier[name] )[ literal[string] ]
identifier[conn] . identifier[set_queue_attributes] ( identifier[QueueUrl] = identifier[url] , identifier[Attributes] = identifier[attributes] )
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[e] :
keyword[return] { literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )}
keyword[return] { literal[string] : keyword[True] }
|
def set_attributes(name, attributes, region=None, key=None, keyid=None, profile=None):
"""
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes) # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']]
return {'result': True}
|
def track_request(self, name: str, url: str, success: bool, start_time: str=None,
duration: int=None, response_code: str =None, http_method: str=None,
properties: Dict[str, object]=None, measurements: Dict[str, object]=None,
request_id: str=None):
"""
Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
"""
raise NotImplementedError('BotTelemetryClient.track_request(): is not implemented.')
|
def function[track_request, parameter[self, name, url, success, start_time, duration, response_code, http_method, properties, measurements, request_id]]:
constant[
Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
]
<ast.Raise object at 0x7da20c992710>
|
keyword[def] identifier[track_request] ( identifier[self] , identifier[name] : identifier[str] , identifier[url] : identifier[str] , identifier[success] : identifier[bool] , identifier[start_time] : identifier[str] = keyword[None] ,
identifier[duration] : identifier[int] = keyword[None] , identifier[response_code] : identifier[str] = keyword[None] , identifier[http_method] : identifier[str] = keyword[None] ,
identifier[properties] : identifier[Dict] [ identifier[str] , identifier[object] ]= keyword[None] , identifier[measurements] : identifier[Dict] [ identifier[str] , identifier[object] ]= keyword[None] ,
identifier[request_id] : identifier[str] = keyword[None] ):
literal[string]
keyword[raise] identifier[NotImplementedError] ( literal[string] )
|
def track_request(self, name: str, url: str, success: bool, start_time: str=None, duration: int=None, response_code: str=None, http_method: str=None, properties: Dict[str, object]=None, measurements: Dict[str, object]=None, request_id: str=None):
"""
Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
"""
raise NotImplementedError('BotTelemetryClient.track_request(): is not implemented.')
|
def immutable(members='', name='Immutable', verbose=False):
"""
Produces a class that either can be used standalone or as a base class for persistent classes.
This is a thin wrapper around a named tuple.
Constructing a type and using it to instantiate objects:
>>> Point = immutable('x, y', name='Point')
>>> p = Point(1, 2)
>>> p2 = p.set(x=3)
>>> p
Point(x=1, y=2)
>>> p2
Point(x=3, y=2)
Inheriting from a constructed type. In this case no type name needs to be supplied:
>>> class PositivePoint(immutable('x, y')):
... __slots__ = tuple()
... def __new__(cls, x, y):
... if x > 0 and y > 0:
... return super(PositivePoint, cls).__new__(cls, x, y)
... raise Exception('Coordinates must be positive!')
...
>>> p = PositivePoint(1, 2)
>>> p.set(x=3)
PositivePoint(x=3, y=2)
>>> p.set(y=-3)
Traceback (most recent call last):
Exception: Coordinates must be positive!
The persistent class also supports the notion of frozen members. The value of a frozen member
cannot be updated. For example it could be used to implement an ID that should remain the same
over time. A frozen member is denoted by a trailing underscore.
>>> Point = immutable('x, y, id_', name='Point')
>>> p = Point(1, 2, id_=17)
>>> p.set(x=3)
Point(x=3, y=2, id_=17)
>>> p.set(id_=18)
Traceback (most recent call last):
AttributeError: Cannot set frozen members id_
"""
if isinstance(members, six.string_types):
members = members.replace(',', ' ').split()
def frozen_member_test():
frozen_members = ["'%s'" % f for f in members if f.endswith('_')]
if frozen_members:
return """
frozen_fields = fields_to_modify & set([{frozen_members}])
if frozen_fields:
raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))
""".format(frozen_members=', '.join(frozen_members))
return ''
verbose_string = ""
if sys.version_info < (3, 7):
# Verbose is no longer supported in Python 3.7
verbose_string = ", verbose={verbose}".format(verbose=verbose)
quoted_members = ', '.join("'%s'" % m for m in members)
template = """
class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})):
__slots__ = tuple()
def __repr__(self):
return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__)
def set(self, **kwargs):
if not kwargs:
return self
fields_to_modify = set(kwargs.keys())
if not fields_to_modify <= {member_set}:
raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set}))
{frozen_member_test}
return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))
""".format(quoted_members=quoted_members,
member_set="set([%s])" % quoted_members if quoted_members else 'set()',
frozen_member_test=frozen_member_test(),
verbose_string=verbose_string,
class_name=name)
if verbose:
print(template)
from collections import namedtuple
namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable')
try:
six.exec_(template, namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
return namespace[name]
|
def function[immutable, parameter[members, name, verbose]]:
constant[
Produces a class that either can be used standalone or as a base class for persistent classes.
This is a thin wrapper around a named tuple.
Constructing a type and using it to instantiate objects:
>>> Point = immutable('x, y', name='Point')
>>> p = Point(1, 2)
>>> p2 = p.set(x=3)
>>> p
Point(x=1, y=2)
>>> p2
Point(x=3, y=2)
Inheriting from a constructed type. In this case no type name needs to be supplied:
>>> class PositivePoint(immutable('x, y')):
... __slots__ = tuple()
... def __new__(cls, x, y):
... if x > 0 and y > 0:
... return super(PositivePoint, cls).__new__(cls, x, y)
... raise Exception('Coordinates must be positive!')
...
>>> p = PositivePoint(1, 2)
>>> p.set(x=3)
PositivePoint(x=3, y=2)
>>> p.set(y=-3)
Traceback (most recent call last):
Exception: Coordinates must be positive!
The persistent class also supports the notion of frozen members. The value of a frozen member
cannot be updated. For example it could be used to implement an ID that should remain the same
over time. A frozen member is denoted by a trailing underscore.
>>> Point = immutable('x, y, id_', name='Point')
>>> p = Point(1, 2, id_=17)
>>> p.set(x=3)
Point(x=3, y=2, id_=17)
>>> p.set(id_=18)
Traceback (most recent call last):
AttributeError: Cannot set frozen members id_
]
if call[name[isinstance], parameter[name[members], name[six].string_types]] begin[:]
variable[members] assign[=] call[call[name[members].replace, parameter[constant[,], constant[ ]]].split, parameter[]]
def function[frozen_member_test, parameter[]]:
variable[frozen_members] assign[=] <ast.ListComp object at 0x7da18f813d90>
if name[frozen_members] begin[:]
return[call[constant[
frozen_fields = fields_to_modify & set([{frozen_members}])
if frozen_fields:
raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))
].format, parameter[]]]
return[constant[]]
variable[verbose_string] assign[=] constant[]
if compare[name[sys].version_info less[<] tuple[[<ast.Constant object at 0x7da1b1e73d90>, <ast.Constant object at 0x7da1b1e73b50>]]] begin[:]
variable[verbose_string] assign[=] call[constant[, verbose={verbose}].format, parameter[]]
variable[quoted_members] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b1e71ea0>]]
variable[template] assign[=] call[constant[
class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})):
__slots__ = tuple()
def __repr__(self):
return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__)
def set(self, **kwargs):
if not kwargs:
return self
fields_to_modify = set(kwargs.keys())
if not fields_to_modify <= {member_set}:
raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set}))
{frozen_member_test}
return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))
].format, parameter[]]
if name[verbose] begin[:]
call[name[print], parameter[name[template]]]
from relative_module[collections] import module[namedtuple]
variable[namespace] assign[=] call[name[dict], parameter[]]
<ast.Try object at 0x7da1b1e72980>
return[call[name[namespace]][name[name]]]
|
keyword[def] identifier[immutable] ( identifier[members] = literal[string] , identifier[name] = literal[string] , identifier[verbose] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[members] , identifier[six] . identifier[string_types] ):
identifier[members] = identifier[members] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ()
keyword[def] identifier[frozen_member_test] ():
identifier[frozen_members] =[ literal[string] % identifier[f] keyword[for] identifier[f] keyword[in] identifier[members] keyword[if] identifier[f] . identifier[endswith] ( literal[string] )]
keyword[if] identifier[frozen_members] :
keyword[return] literal[string] . identifier[format] ( identifier[frozen_members] = literal[string] . identifier[join] ( identifier[frozen_members] ))
keyword[return] literal[string]
identifier[verbose_string] = literal[string]
keyword[if] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] ):
identifier[verbose_string] = literal[string] . identifier[format] ( identifier[verbose] = identifier[verbose] )
identifier[quoted_members] = literal[string] . identifier[join] ( literal[string] % identifier[m] keyword[for] identifier[m] keyword[in] identifier[members] )
identifier[template] = literal[string] . identifier[format] ( identifier[quoted_members] = identifier[quoted_members] ,
identifier[member_set] = literal[string] % identifier[quoted_members] keyword[if] identifier[quoted_members] keyword[else] literal[string] ,
identifier[frozen_member_test] = identifier[frozen_member_test] (),
identifier[verbose_string] = identifier[verbose_string] ,
identifier[class_name] = identifier[name] )
keyword[if] identifier[verbose] :
identifier[print] ( identifier[template] )
keyword[from] identifier[collections] keyword[import] identifier[namedtuple]
identifier[namespace] = identifier[dict] ( identifier[namedtuple] = identifier[namedtuple] , identifier[__name__] = literal[string] )
keyword[try] :
identifier[six] . identifier[exec_] ( identifier[template] , identifier[namespace] )
keyword[except] identifier[SyntaxError] keyword[as] identifier[e] :
keyword[raise] identifier[SyntaxError] ( identifier[e] . identifier[message] + literal[string] + identifier[template] )
keyword[return] identifier[namespace] [ identifier[name] ]
|
def immutable(members='', name='Immutable', verbose=False):
"""
Produces a class that either can be used standalone or as a base class for persistent classes.
This is a thin wrapper around a named tuple.
Constructing a type and using it to instantiate objects:
>>> Point = immutable('x, y', name='Point')
>>> p = Point(1, 2)
>>> p2 = p.set(x=3)
>>> p
Point(x=1, y=2)
>>> p2
Point(x=3, y=2)
Inheriting from a constructed type. In this case no type name needs to be supplied:
>>> class PositivePoint(immutable('x, y')):
... __slots__ = tuple()
... def __new__(cls, x, y):
... if x > 0 and y > 0:
... return super(PositivePoint, cls).__new__(cls, x, y)
... raise Exception('Coordinates must be positive!')
...
>>> p = PositivePoint(1, 2)
>>> p.set(x=3)
PositivePoint(x=3, y=2)
>>> p.set(y=-3)
Traceback (most recent call last):
Exception: Coordinates must be positive!
The persistent class also supports the notion of frozen members. The value of a frozen member
cannot be updated. For example it could be used to implement an ID that should remain the same
over time. A frozen member is denoted by a trailing underscore.
>>> Point = immutable('x, y, id_', name='Point')
>>> p = Point(1, 2, id_=17)
>>> p.set(x=3)
Point(x=3, y=2, id_=17)
>>> p.set(id_=18)
Traceback (most recent call last):
AttributeError: Cannot set frozen members id_
"""
if isinstance(members, six.string_types):
members = members.replace(',', ' ').split() # depends on [control=['if'], data=[]]
def frozen_member_test():
frozen_members = ["'%s'" % f for f in members if f.endswith('_')]
if frozen_members:
return "\n frozen_fields = fields_to_modify & set([{frozen_members}])\n if frozen_fields:\n raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))\n ".format(frozen_members=', '.join(frozen_members)) # depends on [control=['if'], data=[]]
return ''
verbose_string = ''
if sys.version_info < (3, 7):
# Verbose is no longer supported in Python 3.7
verbose_string = ', verbose={verbose}'.format(verbose=verbose) # depends on [control=['if'], data=[]]
quoted_members = ', '.join(("'%s'" % m for m in members))
template = '\nclass {class_name}(namedtuple(\'ImmutableBase\', [{quoted_members}]{verbose_string})):\n __slots__ = tuple()\n\n def __repr__(self):\n return super({class_name}, self).__repr__().replace(\'ImmutableBase\', self.__class__.__name__)\n\n def set(self, **kwargs):\n if not kwargs:\n return self\n\n fields_to_modify = set(kwargs.keys())\n if not fields_to_modify <= {member_set}:\n raise AttributeError("\'%s\' is not a member" % \', \'.join(fields_to_modify - {member_set}))\n\n {frozen_member_test}\n\n return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))\n'.format(quoted_members=quoted_members, member_set='set([%s])' % quoted_members if quoted_members else 'set()', frozen_member_test=frozen_member_test(), verbose_string=verbose_string, class_name=name)
if verbose:
print(template) # depends on [control=['if'], data=[]]
from collections import namedtuple
namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable')
try:
six.exec_(template, namespace) # depends on [control=['try'], data=[]]
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template) # depends on [control=['except'], data=['e']]
return namespace[name]
|
def inv_edges(json):
"""Switch obj/sub for a set of edges (makes fixing known inverse edges MUCH easier)"""
for edge in json['edges']:
sub, obj = edge['sub'], edge['obj']
edge['sub'] = obj
edge['obj'] = sub
edge['pred'] += 'INVERTED'
|
def function[inv_edges, parameter[json]]:
constant[Switch obj/sub for a set of edges (makes fixing known inverse edges MUCH easier)]
for taget[name[edge]] in starred[call[name[json]][constant[edges]]] begin[:]
<ast.Tuple object at 0x7da1b1be7a30> assign[=] tuple[[<ast.Subscript object at 0x7da1b1be46a0>, <ast.Subscript object at 0x7da1b1be7c10>]]
call[name[edge]][constant[sub]] assign[=] name[obj]
call[name[edge]][constant[obj]] assign[=] name[sub]
<ast.AugAssign object at 0x7da1b1be5660>
|
keyword[def] identifier[inv_edges] ( identifier[json] ):
literal[string]
keyword[for] identifier[edge] keyword[in] identifier[json] [ literal[string] ]:
identifier[sub] , identifier[obj] = identifier[edge] [ literal[string] ], identifier[edge] [ literal[string] ]
identifier[edge] [ literal[string] ]= identifier[obj]
identifier[edge] [ literal[string] ]= identifier[sub]
identifier[edge] [ literal[string] ]+= literal[string]
|
def inv_edges(json):
"""Switch obj/sub for a set of edges (makes fixing known inverse edges MUCH easier)"""
for edge in json['edges']:
(sub, obj) = (edge['sub'], edge['obj'])
edge['sub'] = obj
edge['obj'] = sub
edge['pred'] += 'INVERTED' # depends on [control=['for'], data=['edge']]
|
def _commit(self):
"""Commits the batch.
This is called by :meth:`commit`.
"""
if self._id is None:
mode = _datastore_pb2.CommitRequest.NON_TRANSACTIONAL
else:
mode = _datastore_pb2.CommitRequest.TRANSACTIONAL
commit_response_pb = self._client._datastore_api.commit(
self.project, mode, self._mutations, transaction=self._id
)
_, updated_keys = _parse_commit_response(commit_response_pb)
# If the back-end returns without error, we are guaranteed that
# ``commit`` will return keys that match (length and
# order) directly ``_partial_key_entities``.
for new_key_pb, entity in zip(updated_keys, self._partial_key_entities):
new_id = new_key_pb.path[-1].id
entity.key = entity.key.completed_key(new_id)
|
def function[_commit, parameter[self]]:
constant[Commits the batch.
This is called by :meth:`commit`.
]
if compare[name[self]._id is constant[None]] begin[:]
variable[mode] assign[=] name[_datastore_pb2].CommitRequest.NON_TRANSACTIONAL
variable[commit_response_pb] assign[=] call[name[self]._client._datastore_api.commit, parameter[name[self].project, name[mode], name[self]._mutations]]
<ast.Tuple object at 0x7da20c6c5c30> assign[=] call[name[_parse_commit_response], parameter[name[commit_response_pb]]]
for taget[tuple[[<ast.Name object at 0x7da20c6c5870>, <ast.Name object at 0x7da20c6c6440>]]] in starred[call[name[zip], parameter[name[updated_keys], name[self]._partial_key_entities]]] begin[:]
variable[new_id] assign[=] call[name[new_key_pb].path][<ast.UnaryOp object at 0x7da18bcc8a60>].id
name[entity].key assign[=] call[name[entity].key.completed_key, parameter[name[new_id]]]
|
keyword[def] identifier[_commit] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_id] keyword[is] keyword[None] :
identifier[mode] = identifier[_datastore_pb2] . identifier[CommitRequest] . identifier[NON_TRANSACTIONAL]
keyword[else] :
identifier[mode] = identifier[_datastore_pb2] . identifier[CommitRequest] . identifier[TRANSACTIONAL]
identifier[commit_response_pb] = identifier[self] . identifier[_client] . identifier[_datastore_api] . identifier[commit] (
identifier[self] . identifier[project] , identifier[mode] , identifier[self] . identifier[_mutations] , identifier[transaction] = identifier[self] . identifier[_id]
)
identifier[_] , identifier[updated_keys] = identifier[_parse_commit_response] ( identifier[commit_response_pb] )
keyword[for] identifier[new_key_pb] , identifier[entity] keyword[in] identifier[zip] ( identifier[updated_keys] , identifier[self] . identifier[_partial_key_entities] ):
identifier[new_id] = identifier[new_key_pb] . identifier[path] [- literal[int] ]. identifier[id]
identifier[entity] . identifier[key] = identifier[entity] . identifier[key] . identifier[completed_key] ( identifier[new_id] )
|
def _commit(self):
"""Commits the batch.
This is called by :meth:`commit`.
"""
if self._id is None:
mode = _datastore_pb2.CommitRequest.NON_TRANSACTIONAL # depends on [control=['if'], data=[]]
else:
mode = _datastore_pb2.CommitRequest.TRANSACTIONAL
commit_response_pb = self._client._datastore_api.commit(self.project, mode, self._mutations, transaction=self._id)
(_, updated_keys) = _parse_commit_response(commit_response_pb)
# If the back-end returns without error, we are guaranteed that
# ``commit`` will return keys that match (length and
# order) directly ``_partial_key_entities``.
for (new_key_pb, entity) in zip(updated_keys, self._partial_key_entities):
new_id = new_key_pb.path[-1].id
entity.key = entity.key.completed_key(new_id) # depends on [control=['for'], data=[]]
|
def build_boolCoeff(self):
''' Compute coefficients for tuple space.
'''
# coefficients for hill functions from boolean update rules
self.boolCoeff = collections.OrderedDict([(s,[]) for s in self.varNames.keys()])
# parents
self.pas = collections.OrderedDict([(s,[]) for s in self.varNames.keys()])
#
for key in self.boolRules.keys():
rule = self.boolRules[key]
self.pas[key] = self.parents_from_boolRule(rule)
pasIndices = [self.varNames[pa] for pa in self.pas[key]]
# check whether there are coupling matrix entries for each parent
for g in range(self.dim):
if g in pasIndices:
if np.abs(self.Coupl[self.varNames[key],g]) < 1e-10:
raise ValueError('specify coupling value for '+str(key)+' <- '+str(g))
else:
if np.abs(self.Coupl[self.varNames[key],g]) > 1e-10:
raise ValueError('there should be no coupling value for '+str(key)+' <- '+str(g))
if self.verbosity > 1:
settings.m(0,'...'+key)
settings.m(0,rule)
settings.m(0,rule_pa)
# now evaluate coefficients
for tuple in list(itertools.product([False,True],repeat=len(self.pas[key]))):
if self.process_rule(rule,self.pas[key],tuple):
self.boolCoeff[key].append(tuple)
#
if self.verbosity > 1:
settings.m(0,self.boolCoeff[key])
|
def function[build_boolCoeff, parameter[self]]:
constant[ Compute coefficients for tuple space.
]
name[self].boolCoeff assign[=] call[name[collections].OrderedDict, parameter[<ast.ListComp object at 0x7da18eb55cf0>]]
name[self].pas assign[=] call[name[collections].OrderedDict, parameter[<ast.ListComp object at 0x7da18eb57340>]]
for taget[name[key]] in starred[call[name[self].boolRules.keys, parameter[]]] begin[:]
variable[rule] assign[=] call[name[self].boolRules][name[key]]
call[name[self].pas][name[key]] assign[=] call[name[self].parents_from_boolRule, parameter[name[rule]]]
variable[pasIndices] assign[=] <ast.ListComp object at 0x7da18eb54820>
for taget[name[g]] in starred[call[name[range], parameter[name[self].dim]]] begin[:]
if compare[name[g] in name[pasIndices]] begin[:]
if compare[call[name[np].abs, parameter[call[name[self].Coupl][tuple[[<ast.Subscript object at 0x7da18eb561a0>, <ast.Name object at 0x7da18eb55ea0>]]]]] less[<] constant[1e-10]] begin[:]
<ast.Raise object at 0x7da18eb56f80>
if compare[name[self].verbosity greater[>] constant[1]] begin[:]
call[name[settings].m, parameter[constant[0], binary_operation[constant[...] + name[key]]]]
call[name[settings].m, parameter[constant[0], name[rule]]]
call[name[settings].m, parameter[constant[0], name[rule_pa]]]
for taget[name[tuple]] in starred[call[name[list], parameter[call[name[itertools].product, parameter[list[[<ast.Constant object at 0x7da204567850>, <ast.Constant object at 0x7da2045654e0>]]]]]]] begin[:]
if call[name[self].process_rule, parameter[name[rule], call[name[self].pas][name[key]], name[tuple]]] begin[:]
call[call[name[self].boolCoeff][name[key]].append, parameter[name[tuple]]]
if compare[name[self].verbosity greater[>] constant[1]] begin[:]
call[name[settings].m, parameter[constant[0], call[name[self].boolCoeff][name[key]]]]
|
keyword[def] identifier[build_boolCoeff] ( identifier[self] ):
literal[string]
identifier[self] . identifier[boolCoeff] = identifier[collections] . identifier[OrderedDict] ([( identifier[s] ,[]) keyword[for] identifier[s] keyword[in] identifier[self] . identifier[varNames] . identifier[keys] ()])
identifier[self] . identifier[pas] = identifier[collections] . identifier[OrderedDict] ([( identifier[s] ,[]) keyword[for] identifier[s] keyword[in] identifier[self] . identifier[varNames] . identifier[keys] ()])
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[boolRules] . identifier[keys] ():
identifier[rule] = identifier[self] . identifier[boolRules] [ identifier[key] ]
identifier[self] . identifier[pas] [ identifier[key] ]= identifier[self] . identifier[parents_from_boolRule] ( identifier[rule] )
identifier[pasIndices] =[ identifier[self] . identifier[varNames] [ identifier[pa] ] keyword[for] identifier[pa] keyword[in] identifier[self] . identifier[pas] [ identifier[key] ]]
keyword[for] identifier[g] keyword[in] identifier[range] ( identifier[self] . identifier[dim] ):
keyword[if] identifier[g] keyword[in] identifier[pasIndices] :
keyword[if] identifier[np] . identifier[abs] ( identifier[self] . identifier[Coupl] [ identifier[self] . identifier[varNames] [ identifier[key] ], identifier[g] ])< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[key] )+ literal[string] + identifier[str] ( identifier[g] ))
keyword[else] :
keyword[if] identifier[np] . identifier[abs] ( identifier[self] . identifier[Coupl] [ identifier[self] . identifier[varNames] [ identifier[key] ], identifier[g] ])> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[key] )+ literal[string] + identifier[str] ( identifier[g] ))
keyword[if] identifier[self] . identifier[verbosity] > literal[int] :
identifier[settings] . identifier[m] ( literal[int] , literal[string] + identifier[key] )
identifier[settings] . identifier[m] ( literal[int] , identifier[rule] )
identifier[settings] . identifier[m] ( literal[int] , identifier[rule_pa] )
keyword[for] identifier[tuple] keyword[in] identifier[list] ( identifier[itertools] . identifier[product] ([ keyword[False] , keyword[True] ], identifier[repeat] = identifier[len] ( identifier[self] . identifier[pas] [ identifier[key] ]))):
keyword[if] identifier[self] . identifier[process_rule] ( identifier[rule] , identifier[self] . identifier[pas] [ identifier[key] ], identifier[tuple] ):
identifier[self] . identifier[boolCoeff] [ identifier[key] ]. identifier[append] ( identifier[tuple] )
keyword[if] identifier[self] . identifier[verbosity] > literal[int] :
identifier[settings] . identifier[m] ( literal[int] , identifier[self] . identifier[boolCoeff] [ identifier[key] ])
|
def build_boolCoeff(self):
""" Compute coefficients for tuple space.
"""
# coefficients for hill functions from boolean update rules
self.boolCoeff = collections.OrderedDict([(s, []) for s in self.varNames.keys()])
# parents
self.pas = collections.OrderedDict([(s, []) for s in self.varNames.keys()])
#
for key in self.boolRules.keys():
rule = self.boolRules[key]
self.pas[key] = self.parents_from_boolRule(rule)
pasIndices = [self.varNames[pa] for pa in self.pas[key]]
# check whether there are coupling matrix entries for each parent
for g in range(self.dim):
if g in pasIndices:
if np.abs(self.Coupl[self.varNames[key], g]) < 1e-10:
raise ValueError('specify coupling value for ' + str(key) + ' <- ' + str(g)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['g']]
elif np.abs(self.Coupl[self.varNames[key], g]) > 1e-10:
raise ValueError('there should be no coupling value for ' + str(key) + ' <- ' + str(g)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['g']]
if self.verbosity > 1:
settings.m(0, '...' + key)
settings.m(0, rule)
settings.m(0, rule_pa) # depends on [control=['if'], data=[]]
# now evaluate coefficients
for tuple in list(itertools.product([False, True], repeat=len(self.pas[key]))):
if self.process_rule(rule, self.pas[key], tuple):
self.boolCoeff[key].append(tuple) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tuple']]
#
if self.verbosity > 1:
settings.m(0, self.boolCoeff[key]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
|
def reload_cache_config(self, call_params):
"""REST Reload Plivo Cache Config helper
"""
path = '/' + self.api_version + '/ReloadCacheConfig/'
method = 'POST'
return self.request(path, method, call_params)
|
def function[reload_cache_config, parameter[self, call_params]]:
constant[REST Reload Plivo Cache Config helper
]
variable[path] assign[=] binary_operation[binary_operation[constant[/] + name[self].api_version] + constant[/ReloadCacheConfig/]]
variable[method] assign[=] constant[POST]
return[call[name[self].request, parameter[name[path], name[method], name[call_params]]]]
|
keyword[def] identifier[reload_cache_config] ( identifier[self] , identifier[call_params] ):
literal[string]
identifier[path] = literal[string] + identifier[self] . identifier[api_version] + literal[string]
identifier[method] = literal[string]
keyword[return] identifier[self] . identifier[request] ( identifier[path] , identifier[method] , identifier[call_params] )
|
def reload_cache_config(self, call_params):
"""REST Reload Plivo Cache Config helper
"""
path = '/' + self.api_version + '/ReloadCacheConfig/'
method = 'POST'
return self.request(path, method, call_params)
|
def version_id(self):
"""Return the version of the community.
:returns: hash which encodes the community id and its las update.
:rtype: str
"""
return hashlib.sha1('{0}__{1}'.format(
self.id, self.updated).encode('utf-8')).hexdigest()
|
def function[version_id, parameter[self]]:
constant[Return the version of the community.
:returns: hash which encodes the community id and its las update.
:rtype: str
]
return[call[call[name[hashlib].sha1, parameter[call[call[constant[{0}__{1}].format, parameter[name[self].id, name[self].updated]].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]]
|
keyword[def] identifier[version_id] ( identifier[self] ):
literal[string]
keyword[return] identifier[hashlib] . identifier[sha1] ( literal[string] . identifier[format] (
identifier[self] . identifier[id] , identifier[self] . identifier[updated] ). identifier[encode] ( literal[string] )). identifier[hexdigest] ()
|
def version_id(self):
"""Return the version of the community.
:returns: hash which encodes the community id and its las update.
:rtype: str
"""
return hashlib.sha1('{0}__{1}'.format(self.id, self.updated).encode('utf-8')).hexdigest()
|
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
output = mkl_fft.irfft_numpy(a, n=n, axis=axis)
if _unitary(norm):
output *= sqrt(output.shape[axis])
return output
|
def function[irfft, parameter[a, n, axis, norm]]:
constant[
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
]
variable[output] assign[=] call[name[mkl_fft].irfft_numpy, parameter[name[a]]]
if call[name[_unitary], parameter[name[norm]]] begin[:]
<ast.AugAssign object at 0x7da20c991750>
return[name[output]]
|
keyword[def] identifier[irfft] ( identifier[a] , identifier[n] = keyword[None] , identifier[axis] =- literal[int] , identifier[norm] = keyword[None] ):
literal[string]
identifier[output] = identifier[mkl_fft] . identifier[irfft_numpy] ( identifier[a] , identifier[n] = identifier[n] , identifier[axis] = identifier[axis] )
keyword[if] identifier[_unitary] ( identifier[norm] ):
identifier[output] *= identifier[sqrt] ( identifier[output] . identifier[shape] [ identifier[axis] ])
keyword[return] identifier[output]
|
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
output = mkl_fft.irfft_numpy(a, n=n, axis=axis)
if _unitary(norm):
output *= sqrt(output.shape[axis]) # depends on [control=['if'], data=[]]
return output
|
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {'dims': self.dims,
'attrs': decode_numpy_dict_values(self.attrs)}
if data:
item['data'] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({'dtype': str(self.dtype), 'shape': self.shape})
return item
|
def function[to_dict, parameter[self, data]]:
constant[Dictionary representation of variable.]
variable[item] assign[=] dictionary[[<ast.Constant object at 0x7da18eb542e0>, <ast.Constant object at 0x7da18eb55b70>], [<ast.Attribute object at 0x7da18eb57190>, <ast.Call object at 0x7da18eb54af0>]]
if name[data] begin[:]
call[name[item]][constant[data]] assign[=] call[call[name[ensure_us_time_resolution], parameter[name[self].values]].tolist, parameter[]]
return[name[item]]
|
keyword[def] identifier[to_dict] ( identifier[self] , identifier[data] = keyword[True] ):
literal[string]
identifier[item] ={ literal[string] : identifier[self] . identifier[dims] ,
literal[string] : identifier[decode_numpy_dict_values] ( identifier[self] . identifier[attrs] )}
keyword[if] identifier[data] :
identifier[item] [ literal[string] ]= identifier[ensure_us_time_resolution] ( identifier[self] . identifier[values] ). identifier[tolist] ()
keyword[else] :
identifier[item] . identifier[update] ({ literal[string] : identifier[str] ( identifier[self] . identifier[dtype] ), literal[string] : identifier[self] . identifier[shape] })
keyword[return] identifier[item]
|
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {'dims': self.dims, 'attrs': decode_numpy_dict_values(self.attrs)}
if data:
item['data'] = ensure_us_time_resolution(self.values).tolist() # depends on [control=['if'], data=[]]
else:
item.update({'dtype': str(self.dtype), 'shape': self.shape})
return item
|
def unwrap(value):
"""
Unwraps the given Document or DocumentList as applicable.
"""
if isinstance(value, Document):
return value.to_dict()
elif isinstance(value, DocumentList):
return value.to_list()
else:
return value
|
def function[unwrap, parameter[value]]:
constant[
Unwraps the given Document or DocumentList as applicable.
]
if call[name[isinstance], parameter[name[value], name[Document]]] begin[:]
return[call[name[value].to_dict, parameter[]]]
|
keyword[def] identifier[unwrap] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Document] ):
keyword[return] identifier[value] . identifier[to_dict] ()
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[DocumentList] ):
keyword[return] identifier[value] . identifier[to_list] ()
keyword[else] :
keyword[return] identifier[value]
|
def unwrap(value):
"""
Unwraps the given Document or DocumentList as applicable.
"""
if isinstance(value, Document):
return value.to_dict() # depends on [control=['if'], data=[]]
elif isinstance(value, DocumentList):
return value.to_list() # depends on [control=['if'], data=[]]
else:
return value
|
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
|
def function[read_content_types, parameter[archive]]:
constant[Read content types.]
variable[xml_source] assign[=] call[name[archive].read, parameter[name[ARC_CONTENT_TYPES]]]
variable[root] assign[=] call[name[fromstring], parameter[name[xml_source]]]
variable[contents_root] assign[=] call[name[root].findall, parameter[binary_operation[constant[{%s}Override] <ast.Mod object at 0x7da2590d6920> name[CONTYPES_NS]]]]
for taget[name[type]] in starred[name[contents_root]] begin[:]
<ast.Yield object at 0x7da1b0744670>
|
keyword[def] identifier[read_content_types] ( identifier[archive] ):
literal[string]
identifier[xml_source] = identifier[archive] . identifier[read] ( identifier[ARC_CONTENT_TYPES] )
identifier[root] = identifier[fromstring] ( identifier[xml_source] )
identifier[contents_root] = identifier[root] . identifier[findall] ( literal[string] % identifier[CONTYPES_NS] )
keyword[for] identifier[type] keyword[in] identifier[contents_root] :
keyword[yield] identifier[type] . identifier[get] ( literal[string] ), identifier[type] . identifier[get] ( literal[string] )
|
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield (type.get('ContentType'), type.get('PartName')) # depends on [control=['for'], data=['type']]
|
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if PY3 and self._broken:
raise BrokenProcessPool(
"A child process terminated "
"abruptly, the process pool is not usable anymore"
)
if self._shutdown_thread:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
self._result_queue.put(None)
self._start_queue_management_thread()
if PY2:
self._adjust_process_count()
self._all_futures.add(future)
return future
|
def function[submit, parameter[self, func]]:
constant[Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`]
with name[self]._shutdown_lock begin[:]
if <ast.BoolOp object at 0x7da20e9b3a90> begin[:]
<ast.Raise object at 0x7da20e9b1720>
if name[self]._shutdown_thread begin[:]
<ast.Raise object at 0x7da2043456f0>
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self].default_callback]]
variable[future] assign[=] call[name[NewFuture], parameter[name[self]._timeout, name[args], name[kwargs]]]
variable[w] assign[=] call[name[_WorkItem], parameter[name[future], name[func], name[args], name[kwargs]]]
call[name[self]._pending_work_items][name[self]._queue_count] assign[=] name[w]
call[name[self]._work_ids.put, parameter[name[self]._queue_count]]
<ast.AugAssign object at 0x7da20e957f40>
call[name[self]._result_queue.put, parameter[constant[None]]]
call[name[self]._start_queue_management_thread, parameter[]]
if name[PY2] begin[:]
call[name[self]._adjust_process_count, parameter[]]
call[name[self]._all_futures.add, parameter[name[future]]]
return[name[future]]
|
keyword[def] identifier[submit] ( identifier[self] , identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[with] identifier[self] . identifier[_shutdown_lock] :
keyword[if] identifier[PY3] keyword[and] identifier[self] . identifier[_broken] :
keyword[raise] identifier[BrokenProcessPool] (
literal[string]
literal[string]
)
keyword[if] identifier[self] . identifier[_shutdown_thread] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[default_callback] )
identifier[future] = identifier[NewFuture] (
identifier[self] . identifier[_timeout] ,
identifier[args] ,
identifier[kwargs] ,
identifier[callback] = identifier[callback] ,
identifier[catch_exception] = identifier[self] . identifier[catch_exception] ,
)
identifier[w] = identifier[_WorkItem] ( identifier[future] , identifier[func] , identifier[args] , identifier[kwargs] )
identifier[self] . identifier[_pending_work_items] [ identifier[self] . identifier[_queue_count] ]= identifier[w]
identifier[self] . identifier[_work_ids] . identifier[put] ( identifier[self] . identifier[_queue_count] )
identifier[self] . identifier[_queue_count] += literal[int]
identifier[self] . identifier[_result_queue] . identifier[put] ( keyword[None] )
identifier[self] . identifier[_start_queue_management_thread] ()
keyword[if] identifier[PY2] :
identifier[self] . identifier[_adjust_process_count] ()
identifier[self] . identifier[_all_futures] . identifier[add] ( identifier[future] )
keyword[return] identifier[future]
|
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if PY3 and self._broken:
raise BrokenProcessPool('A child process terminated abruptly, the process pool is not usable anymore') # depends on [control=['if'], data=[]]
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown') # depends on [control=['if'], data=[]]
callback = kwargs.pop('callback', self.default_callback)
future = NewFuture(self._timeout, args, kwargs, callback=callback, catch_exception=self.catch_exception)
w = _WorkItem(future, func, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
self._result_queue.put(None)
self._start_queue_management_thread()
if PY2:
self._adjust_process_count() # depends on [control=['if'], data=[]]
self._all_futures.add(future)
return future # depends on [control=['with'], data=[]]
|
def genkeyhex():
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
'''
while True:
key = hash256(
hexlify(os.urandom(40) + str(datetime.datetime.now())
.encode("utf-8")))
# 40 bytes used instead of 32, as a buffer for any slight
# lack of entropy in urandom
# Double-sha256 used instead of single hash, for entropy
# reasons as well.
# I know, it's nit-picking, but better safe than sorry.
if int(key,16) > 1 and int(key,16) < N:
break
return key
|
def function[genkeyhex, parameter[]]:
constant[
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
]
while constant[True] begin[:]
variable[key] assign[=] call[name[hash256], parameter[call[name[hexlify], parameter[binary_operation[call[name[os].urandom, parameter[constant[40]]] + call[call[name[str], parameter[call[name[datetime].datetime.now, parameter[]]]].encode, parameter[constant[utf-8]]]]]]]]
if <ast.BoolOp object at 0x7da204960df0> begin[:]
break
return[name[key]]
|
keyword[def] identifier[genkeyhex] ():
literal[string]
keyword[while] keyword[True] :
identifier[key] = identifier[hash256] (
identifier[hexlify] ( identifier[os] . identifier[urandom] ( literal[int] )+ identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())
. identifier[encode] ( literal[string] )))
keyword[if] identifier[int] ( identifier[key] , literal[int] )> literal[int] keyword[and] identifier[int] ( identifier[key] , literal[int] )< identifier[N] :
keyword[break]
keyword[return] identifier[key]
|
def genkeyhex():
"""
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
"""
while True:
key = hash256(hexlify(os.urandom(40) + str(datetime.datetime.now()).encode('utf-8')))
# 40 bytes used instead of 32, as a buffer for any slight
# lack of entropy in urandom
# Double-sha256 used instead of single hash, for entropy
# reasons as well.
# I know, it's nit-picking, but better safe than sorry.
if int(key, 16) > 1 and int(key, 16) < N:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return key
|
def resize(self, flavor):
"""Set the size of this instance to a different flavor."""
# We need the flavorRef, not the flavor or size.
flavorRef = self.manager.api._get_flavor_ref(flavor)
body = {"flavorRef": flavorRef}
self.manager.action(self, "resize", body=body)
|
def function[resize, parameter[self, flavor]]:
constant[Set the size of this instance to a different flavor.]
variable[flavorRef] assign[=] call[name[self].manager.api._get_flavor_ref, parameter[name[flavor]]]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da1b055aef0>], [<ast.Name object at 0x7da1b0559a50>]]
call[name[self].manager.action, parameter[name[self], constant[resize]]]
|
keyword[def] identifier[resize] ( identifier[self] , identifier[flavor] ):
literal[string]
identifier[flavorRef] = identifier[self] . identifier[manager] . identifier[api] . identifier[_get_flavor_ref] ( identifier[flavor] )
identifier[body] ={ literal[string] : identifier[flavorRef] }
identifier[self] . identifier[manager] . identifier[action] ( identifier[self] , literal[string] , identifier[body] = identifier[body] )
|
def resize(self, flavor):
"""Set the size of this instance to a different flavor."""
# We need the flavorRef, not the flavor or size.
flavorRef = self.manager.api._get_flavor_ref(flavor)
body = {'flavorRef': flavorRef}
self.manager.action(self, 'resize', body=body)
|
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, cron_str, cron_job_name, user, _ = self.urn.Split(5)
if cron_str != "cron":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s." % self.urn,
requested_access=self.token.requested_access)
return (user, aff4.ROOT_URN.Add("cron").Add(cron_job_name))
|
def function[InferUserAndSubjectFromUrn, parameter[self]]:
constant[Infers user name and subject urn from self.urn.]
<ast.Tuple object at 0x7da1b1b44a00> assign[=] call[name[self].urn.Split, parameter[constant[5]]]
if compare[name[cron_str] not_equal[!=] constant[cron]] begin[:]
<ast.Raise object at 0x7da1b1b45f30>
return[tuple[[<ast.Name object at 0x7da1b1b454e0>, <ast.Call object at 0x7da1b1b44e80>]]]
|
keyword[def] identifier[InferUserAndSubjectFromUrn] ( identifier[self] ):
literal[string]
identifier[_] , identifier[cron_str] , identifier[cron_job_name] , identifier[user] , identifier[_] = identifier[self] . identifier[urn] . identifier[Split] ( literal[int] )
keyword[if] identifier[cron_str] != literal[string] :
keyword[raise] identifier[access_control] . identifier[UnauthorizedAccess] (
literal[string] % identifier[self] . identifier[urn] ,
identifier[requested_access] = identifier[self] . identifier[token] . identifier[requested_access] )
keyword[return] ( identifier[user] , identifier[aff4] . identifier[ROOT_URN] . identifier[Add] ( literal[string] ). identifier[Add] ( identifier[cron_job_name] ))
|
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
(_, cron_str, cron_job_name, user, _) = self.urn.Split(5)
if cron_str != 'cron':
raise access_control.UnauthorizedAccess('Approval object has invalid urn %s.' % self.urn, requested_access=self.token.requested_access) # depends on [control=['if'], data=[]]
return (user, aff4.ROOT_URN.Add('cron').Add(cron_job_name))
|
def write_config(filename, config, mode="w"):
'''use configparser to write a config object to filename
'''
with open(filename, mode) as filey:
config.write(filey)
return filename
|
def function[write_config, parameter[filename, config, mode]]:
constant[use configparser to write a config object to filename
]
with call[name[open], parameter[name[filename], name[mode]]] begin[:]
call[name[config].write, parameter[name[filey]]]
return[name[filename]]
|
keyword[def] identifier[write_config] ( identifier[filename] , identifier[config] , identifier[mode] = literal[string] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , identifier[mode] ) keyword[as] identifier[filey] :
identifier[config] . identifier[write] ( identifier[filey] )
keyword[return] identifier[filename]
|
def write_config(filename, config, mode='w'):
"""use configparser to write a config object to filename
"""
with open(filename, mode) as filey:
config.write(filey) # depends on [control=['with'], data=['filey']]
return filename
|
def cjkFragSplit(frags, maxWidths, calcBounds, encoding='utf8'):
"""
This attempts to be wordSplit for frags using the dumb algorithm
"""
from reportlab.rl_config import _FUZZ
U = [] # get a list of single glyphs with their widths etc etc
for f in frags:
text = f.text
if not isinstance(text, unicode):
text = text.decode(encoding)
if text:
U.extend([cjkU(t, f, encoding) for t in text])
else:
U.append(cjkU(text, f, encoding))
lines = []
widthUsed = lineStartPos = 0
maxWidth = maxWidths[0]
for i, u in enumerate(U):
w = u.width
widthUsed += w
lineBreak = hasattr(u.frag, 'lineBreak')
endLine = (widthUsed > maxWidth + _FUZZ and widthUsed > 0) or lineBreak
if endLine:
if lineBreak: continue
extraSpace = maxWidth - widthUsed + w
#This is the most important of the Japanese typography rules.
#if next character cannot start a line, wrap it up to this line so it hangs
#in the right margin. We won't do two or more though - that's unlikely and
#would result in growing ugliness.
nextChar = U[i]
if nextChar in ALL_CANNOT_START:
extraSpace -= w
i += 1
lines.append(makeCJKParaLine(U[lineStartPos:i], extraSpace, calcBounds))
try:
maxWidth = maxWidths[len(lines)]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
lineStartPos = i
widthUsed = w
i -= 1
#any characters left?
if widthUsed > 0:
lines.append(makeCJKParaLine(U[lineStartPos:], maxWidth - widthUsed, calcBounds))
return ParaLines(kind=1, lines=lines)
|
def function[cjkFragSplit, parameter[frags, maxWidths, calcBounds, encoding]]:
constant[
This attempts to be wordSplit for frags using the dumb algorithm
]
from relative_module[reportlab.rl_config] import module[_FUZZ]
variable[U] assign[=] list[[]]
for taget[name[f]] in starred[name[frags]] begin[:]
variable[text] assign[=] name[f].text
if <ast.UnaryOp object at 0x7da20c76e560> begin[:]
variable[text] assign[=] call[name[text].decode, parameter[name[encoding]]]
if name[text] begin[:]
call[name[U].extend, parameter[<ast.ListComp object at 0x7da20c6ab640>]]
variable[lines] assign[=] list[[]]
variable[widthUsed] assign[=] constant[0]
variable[maxWidth] assign[=] call[name[maxWidths]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da1b12c9720>, <ast.Name object at 0x7da1b12cabf0>]]] in starred[call[name[enumerate], parameter[name[U]]]] begin[:]
variable[w] assign[=] name[u].width
<ast.AugAssign object at 0x7da1b11f5420>
variable[lineBreak] assign[=] call[name[hasattr], parameter[name[u].frag, constant[lineBreak]]]
variable[endLine] assign[=] <ast.BoolOp object at 0x7da18f09fac0>
if name[endLine] begin[:]
if name[lineBreak] begin[:]
continue
variable[extraSpace] assign[=] binary_operation[binary_operation[name[maxWidth] - name[widthUsed]] + name[w]]
variable[nextChar] assign[=] call[name[U]][name[i]]
if compare[name[nextChar] in name[ALL_CANNOT_START]] begin[:]
<ast.AugAssign object at 0x7da18f09d690>
<ast.AugAssign object at 0x7da18f09cd60>
call[name[lines].append, parameter[call[name[makeCJKParaLine], parameter[call[name[U]][<ast.Slice object at 0x7da18f09c130>], name[extraSpace], name[calcBounds]]]]]
<ast.Try object at 0x7da18f09c880>
variable[lineStartPos] assign[=] name[i]
variable[widthUsed] assign[=] name[w]
<ast.AugAssign object at 0x7da18f09c6a0>
if compare[name[widthUsed] greater[>] constant[0]] begin[:]
call[name[lines].append, parameter[call[name[makeCJKParaLine], parameter[call[name[U]][<ast.Slice object at 0x7da18f09d630>], binary_operation[name[maxWidth] - name[widthUsed]], name[calcBounds]]]]]
return[call[name[ParaLines], parameter[]]]
|
keyword[def] identifier[cjkFragSplit] ( identifier[frags] , identifier[maxWidths] , identifier[calcBounds] , identifier[encoding] = literal[string] ):
literal[string]
keyword[from] identifier[reportlab] . identifier[rl_config] keyword[import] identifier[_FUZZ]
identifier[U] =[]
keyword[for] identifier[f] keyword[in] identifier[frags] :
identifier[text] = identifier[f] . identifier[text]
keyword[if] keyword[not] identifier[isinstance] ( identifier[text] , identifier[unicode] ):
identifier[text] = identifier[text] . identifier[decode] ( identifier[encoding] )
keyword[if] identifier[text] :
identifier[U] . identifier[extend] ([ identifier[cjkU] ( identifier[t] , identifier[f] , identifier[encoding] ) keyword[for] identifier[t] keyword[in] identifier[text] ])
keyword[else] :
identifier[U] . identifier[append] ( identifier[cjkU] ( identifier[text] , identifier[f] , identifier[encoding] ))
identifier[lines] =[]
identifier[widthUsed] = identifier[lineStartPos] = literal[int]
identifier[maxWidth] = identifier[maxWidths] [ literal[int] ]
keyword[for] identifier[i] , identifier[u] keyword[in] identifier[enumerate] ( identifier[U] ):
identifier[w] = identifier[u] . identifier[width]
identifier[widthUsed] += identifier[w]
identifier[lineBreak] = identifier[hasattr] ( identifier[u] . identifier[frag] , literal[string] )
identifier[endLine] =( identifier[widthUsed] > identifier[maxWidth] + identifier[_FUZZ] keyword[and] identifier[widthUsed] > literal[int] ) keyword[or] identifier[lineBreak]
keyword[if] identifier[endLine] :
keyword[if] identifier[lineBreak] : keyword[continue]
identifier[extraSpace] = identifier[maxWidth] - identifier[widthUsed] + identifier[w]
identifier[nextChar] = identifier[U] [ identifier[i] ]
keyword[if] identifier[nextChar] keyword[in] identifier[ALL_CANNOT_START] :
identifier[extraSpace] -= identifier[w]
identifier[i] += literal[int]
identifier[lines] . identifier[append] ( identifier[makeCJKParaLine] ( identifier[U] [ identifier[lineStartPos] : identifier[i] ], identifier[extraSpace] , identifier[calcBounds] ))
keyword[try] :
identifier[maxWidth] = identifier[maxWidths] [ identifier[len] ( identifier[lines] )]
keyword[except] identifier[IndexError] :
identifier[maxWidth] = identifier[maxWidths] [- literal[int] ]
identifier[lineStartPos] = identifier[i]
identifier[widthUsed] = identifier[w]
identifier[i] -= literal[int]
keyword[if] identifier[widthUsed] > literal[int] :
identifier[lines] . identifier[append] ( identifier[makeCJKParaLine] ( identifier[U] [ identifier[lineStartPos] :], identifier[maxWidth] - identifier[widthUsed] , identifier[calcBounds] ))
keyword[return] identifier[ParaLines] ( identifier[kind] = literal[int] , identifier[lines] = identifier[lines] )
|
def cjkFragSplit(frags, maxWidths, calcBounds, encoding='utf8'):
"""
This attempts to be wordSplit for frags using the dumb algorithm
"""
from reportlab.rl_config import _FUZZ
U = [] # get a list of single glyphs with their widths etc etc
for f in frags:
text = f.text
if not isinstance(text, unicode):
text = text.decode(encoding) # depends on [control=['if'], data=[]]
if text:
U.extend([cjkU(t, f, encoding) for t in text]) # depends on [control=['if'], data=[]]
else:
U.append(cjkU(text, f, encoding)) # depends on [control=['for'], data=['f']]
lines = []
widthUsed = lineStartPos = 0
maxWidth = maxWidths[0]
for (i, u) in enumerate(U):
w = u.width
widthUsed += w
lineBreak = hasattr(u.frag, 'lineBreak')
endLine = widthUsed > maxWidth + _FUZZ and widthUsed > 0 or lineBreak
if endLine:
if lineBreak:
continue # depends on [control=['if'], data=[]]
extraSpace = maxWidth - widthUsed + w
#This is the most important of the Japanese typography rules.
#if next character cannot start a line, wrap it up to this line so it hangs
#in the right margin. We won't do two or more though - that's unlikely and
#would result in growing ugliness.
nextChar = U[i]
if nextChar in ALL_CANNOT_START:
extraSpace -= w
i += 1 # depends on [control=['if'], data=[]]
lines.append(makeCJKParaLine(U[lineStartPos:i], extraSpace, calcBounds))
try:
maxWidth = maxWidths[len(lines)] # depends on [control=['try'], data=[]]
except IndexError:
maxWidth = maxWidths[-1] # use the last one # depends on [control=['except'], data=[]]
lineStartPos = i
widthUsed = w
i -= 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
#any characters left?
if widthUsed > 0:
lines.append(makeCJKParaLine(U[lineStartPos:], maxWidth - widthUsed, calcBounds)) # depends on [control=['if'], data=['widthUsed']]
return ParaLines(kind=1, lines=lines)
|
def starts(self, layer):
"""Retrieve start positions of elements if given layer."""
starts = []
for data in self[layer]:
starts.append(data[START])
return starts
|
def function[starts, parameter[self, layer]]:
constant[Retrieve start positions of elements if given layer.]
variable[starts] assign[=] list[[]]
for taget[name[data]] in starred[call[name[self]][name[layer]]] begin[:]
call[name[starts].append, parameter[call[name[data]][name[START]]]]
return[name[starts]]
|
keyword[def] identifier[starts] ( identifier[self] , identifier[layer] ):
literal[string]
identifier[starts] =[]
keyword[for] identifier[data] keyword[in] identifier[self] [ identifier[layer] ]:
identifier[starts] . identifier[append] ( identifier[data] [ identifier[START] ])
keyword[return] identifier[starts]
|
def starts(self, layer):
"""Retrieve start positions of elements if given layer."""
starts = []
for data in self[layer]:
starts.append(data[START]) # depends on [control=['for'], data=['data']]
return starts
|
def find_function(self, context, funname):
"""Find a function in the given context by name.
This function will first search the list of builtins and if the
desired function is not a builtin, it will continue to search
the given context.
Args:
context (object): A dict or class that is a typedargs context
funname (str): The name of the function to find
Returns:
callable: The found function.
"""
if funname in self.builtins:
return self.builtins[funname]
func = None
if isinstance(context, dict):
if funname in context:
func = context[funname]
#Allowed lazy loading of functions
if isinstance(func, str):
func = self._deferred_add(func)
context[funname] = func
elif hasattr(context, funname):
func = getattr(context, funname)
if func is None:
raise NotFoundError("Function not found", function=funname)
return func
|
def function[find_function, parameter[self, context, funname]]:
constant[Find a function in the given context by name.
This function will first search the list of builtins and if the
desired function is not a builtin, it will continue to search
the given context.
Args:
context (object): A dict or class that is a typedargs context
funname (str): The name of the function to find
Returns:
callable: The found function.
]
if compare[name[funname] in name[self].builtins] begin[:]
return[call[name[self].builtins][name[funname]]]
variable[func] assign[=] constant[None]
if call[name[isinstance], parameter[name[context], name[dict]]] begin[:]
if compare[name[funname] in name[context]] begin[:]
variable[func] assign[=] call[name[context]][name[funname]]
if call[name[isinstance], parameter[name[func], name[str]]] begin[:]
variable[func] assign[=] call[name[self]._deferred_add, parameter[name[func]]]
call[name[context]][name[funname]] assign[=] name[func]
if compare[name[func] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b026c400>
return[name[func]]
|
keyword[def] identifier[find_function] ( identifier[self] , identifier[context] , identifier[funname] ):
literal[string]
keyword[if] identifier[funname] keyword[in] identifier[self] . identifier[builtins] :
keyword[return] identifier[self] . identifier[builtins] [ identifier[funname] ]
identifier[func] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[context] , identifier[dict] ):
keyword[if] identifier[funname] keyword[in] identifier[context] :
identifier[func] = identifier[context] [ identifier[funname] ]
keyword[if] identifier[isinstance] ( identifier[func] , identifier[str] ):
identifier[func] = identifier[self] . identifier[_deferred_add] ( identifier[func] )
identifier[context] [ identifier[funname] ]= identifier[func]
keyword[elif] identifier[hasattr] ( identifier[context] , identifier[funname] ):
identifier[func] = identifier[getattr] ( identifier[context] , identifier[funname] )
keyword[if] identifier[func] keyword[is] keyword[None] :
keyword[raise] identifier[NotFoundError] ( literal[string] , identifier[function] = identifier[funname] )
keyword[return] identifier[func]
|
def find_function(self, context, funname):
"""Find a function in the given context by name.
This function will first search the list of builtins and if the
desired function is not a builtin, it will continue to search
the given context.
Args:
context (object): A dict or class that is a typedargs context
funname (str): The name of the function to find
Returns:
callable: The found function.
"""
if funname in self.builtins:
return self.builtins[funname] # depends on [control=['if'], data=['funname']]
func = None
if isinstance(context, dict):
if funname in context:
func = context[funname]
#Allowed lazy loading of functions
if isinstance(func, str):
func = self._deferred_add(func)
context[funname] = func # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['funname', 'context']] # depends on [control=['if'], data=[]]
elif hasattr(context, funname):
func = getattr(context, funname) # depends on [control=['if'], data=[]]
if func is None:
raise NotFoundError('Function not found', function=funname) # depends on [control=['if'], data=[]]
return func
|
def __get_idxs(self, words):
"""Returns indexes to appropriate words."""
if self.bow:
return list(itertools.chain.from_iterable(
[self.positions[z] for z in words]))
else:
return self.positions[words]
|
def function[__get_idxs, parameter[self, words]]:
constant[Returns indexes to appropriate words.]
if name[self].bow begin[:]
return[call[name[list], parameter[call[name[itertools].chain.from_iterable, parameter[<ast.ListComp object at 0x7da1b208e020>]]]]]
|
keyword[def] identifier[__get_idxs] ( identifier[self] , identifier[words] ):
literal[string]
keyword[if] identifier[self] . identifier[bow] :
keyword[return] identifier[list] ( identifier[itertools] . identifier[chain] . identifier[from_iterable] (
[ identifier[self] . identifier[positions] [ identifier[z] ] keyword[for] identifier[z] keyword[in] identifier[words] ]))
keyword[else] :
keyword[return] identifier[self] . identifier[positions] [ identifier[words] ]
|
def __get_idxs(self, words):
"""Returns indexes to appropriate words."""
if self.bow:
return list(itertools.chain.from_iterable([self.positions[z] for z in words])) # depends on [control=['if'], data=[]]
else:
return self.positions[words]
|
def load(self, coll):
"""Load and receive the metadata associated with a collection.
If the metadata for the collection is not cached yet its metadata file is read in and stored.
If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent
than the cached time, the cache is updated and returned otherwise the cached version is returned.
:param str coll: Name of a collection
:return: The cached metadata for a collection
:rtype: dict
"""
path = self.template_str.format(coll=coll)
try:
mtime = os.path.getmtime(path)
obj = self.cache.get(path)
except:
return {}
if not obj:
return self.store_new(coll, path, mtime)
cached_mtime, data = obj
if mtime == cached_mtime == mtime:
return obj
return self.store_new(coll, path, mtime)
|
def function[load, parameter[self, coll]]:
constant[Load and receive the metadata associated with a collection.
If the metadata for the collection is not cached yet its metadata file is read in and stored.
If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent
than the cached time, the cache is updated and returned otherwise the cached version is returned.
:param str coll: Name of a collection
:return: The cached metadata for a collection
:rtype: dict
]
variable[path] assign[=] call[name[self].template_str.format, parameter[]]
<ast.Try object at 0x7da204567e80>
if <ast.UnaryOp object at 0x7da2045663e0> begin[:]
return[call[name[self].store_new, parameter[name[coll], name[path], name[mtime]]]]
<ast.Tuple object at 0x7da204564820> assign[=] name[obj]
if compare[name[mtime] equal[==] name[cached_mtime]] begin[:]
return[name[obj]]
return[call[name[self].store_new, parameter[name[coll], name[path], name[mtime]]]]
|
keyword[def] identifier[load] ( identifier[self] , identifier[coll] ):
literal[string]
identifier[path] = identifier[self] . identifier[template_str] . identifier[format] ( identifier[coll] = identifier[coll] )
keyword[try] :
identifier[mtime] = identifier[os] . identifier[path] . identifier[getmtime] ( identifier[path] )
identifier[obj] = identifier[self] . identifier[cache] . identifier[get] ( identifier[path] )
keyword[except] :
keyword[return] {}
keyword[if] keyword[not] identifier[obj] :
keyword[return] identifier[self] . identifier[store_new] ( identifier[coll] , identifier[path] , identifier[mtime] )
identifier[cached_mtime] , identifier[data] = identifier[obj]
keyword[if] identifier[mtime] == identifier[cached_mtime] == identifier[mtime] :
keyword[return] identifier[obj]
keyword[return] identifier[self] . identifier[store_new] ( identifier[coll] , identifier[path] , identifier[mtime] )
|
def load(self, coll):
"""Load and receive the metadata associated with a collection.
If the metadata for the collection is not cached yet its metadata file is read in and stored.
If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent
than the cached time, the cache is updated and returned otherwise the cached version is returned.
:param str coll: Name of a collection
:return: The cached metadata for a collection
:rtype: dict
"""
path = self.template_str.format(coll=coll)
try:
mtime = os.path.getmtime(path)
obj = self.cache.get(path) # depends on [control=['try'], data=[]]
except:
return {} # depends on [control=['except'], data=[]]
if not obj:
return self.store_new(coll, path, mtime) # depends on [control=['if'], data=[]]
(cached_mtime, data) = obj
if mtime == cached_mtime == mtime:
return obj # depends on [control=['if'], data=[]]
return self.store_new(coll, path, mtime)
|
def scrape(cls, start, end, output):
"""
Scrape a MLBAM Data
:param start: Start Day(YYYYMMDD)
:param end: End Day(YYYYMMDD)
:param output: Output directory
"""
# Logger setting
logging.basicConfig(
level=logging.INFO,
format="time:%(asctime)s.%(msecs)03d" + "\tmessage:%(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
# validate
for param_day in ({'name': 'Start Day', 'value': start}, {'name': 'End Day', 'value': end}):
try:
cls._validate_datetime(param_day['value'])
except (validators.Invalid, MlbAmException) as e:
raise MlbAmException('{msg} a {name}.'.format(name=param_day['name'], msg=e.msg))
cls._validate_datetime_from_to(start, end)
# Download
logging.info('->- MLBAM dataset download start')
mlb = MlbAm(os.path.dirname(os.path.abspath(__file__)), output, cls._days(start, end))
mlb.download()
logging.info('-<- MLBAM dataset download end')
|
def function[scrape, parameter[cls, start, end, output]]:
constant[
Scrape a MLBAM Data
:param start: Start Day(YYYYMMDD)
:param end: End Day(YYYYMMDD)
:param output: Output directory
]
call[name[logging].basicConfig, parameter[]]
for taget[name[param_day]] in starred[tuple[[<ast.Dict object at 0x7da20c7944c0>, <ast.Dict object at 0x7da20c7960b0>]]] begin[:]
<ast.Try object at 0x7da20c7957e0>
call[name[cls]._validate_datetime_from_to, parameter[name[start], name[end]]]
call[name[logging].info, parameter[constant[->- MLBAM dataset download start]]]
variable[mlb] assign[=] call[name[MlbAm], parameter[call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[__file__]]]]], name[output], call[name[cls]._days, parameter[name[start], name[end]]]]]
call[name[mlb].download, parameter[]]
call[name[logging].info, parameter[constant[-<- MLBAM dataset download end]]]
|
keyword[def] identifier[scrape] ( identifier[cls] , identifier[start] , identifier[end] , identifier[output] ):
literal[string]
identifier[logging] . identifier[basicConfig] (
identifier[level] = identifier[logging] . identifier[INFO] ,
identifier[format] = literal[string] + literal[string] ,
identifier[datefmt] = literal[string]
)
keyword[for] identifier[param_day] keyword[in] ({ literal[string] : literal[string] , literal[string] : identifier[start] },{ literal[string] : literal[string] , literal[string] : identifier[end] }):
keyword[try] :
identifier[cls] . identifier[_validate_datetime] ( identifier[param_day] [ literal[string] ])
keyword[except] ( identifier[validators] . identifier[Invalid] , identifier[MlbAmException] ) keyword[as] identifier[e] :
keyword[raise] identifier[MlbAmException] ( literal[string] . identifier[format] ( identifier[name] = identifier[param_day] [ literal[string] ], identifier[msg] = identifier[e] . identifier[msg] ))
identifier[cls] . identifier[_validate_datetime_from_to] ( identifier[start] , identifier[end] )
identifier[logging] . identifier[info] ( literal[string] )
identifier[mlb] = identifier[MlbAm] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] )), identifier[output] , identifier[cls] . identifier[_days] ( identifier[start] , identifier[end] ))
identifier[mlb] . identifier[download] ()
identifier[logging] . identifier[info] ( literal[string] )
|
def scrape(cls, start, end, output):
"""
Scrape a MLBAM Data
:param start: Start Day(YYYYMMDD)
:param end: End Day(YYYYMMDD)
:param output: Output directory
"""
# Logger setting
logging.basicConfig(level=logging.INFO, format='time:%(asctime)s.%(msecs)03d' + '\tmessage:%(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# validate
for param_day in ({'name': 'Start Day', 'value': start}, {'name': 'End Day', 'value': end}):
try:
cls._validate_datetime(param_day['value']) # depends on [control=['try'], data=[]]
except (validators.Invalid, MlbAmException) as e:
raise MlbAmException('{msg} a {name}.'.format(name=param_day['name'], msg=e.msg)) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['param_day']]
cls._validate_datetime_from_to(start, end)
# Download
logging.info('->- MLBAM dataset download start')
mlb = MlbAm(os.path.dirname(os.path.abspath(__file__)), output, cls._days(start, end))
mlb.download()
logging.info('-<- MLBAM dataset download end')
|
def chunks(self, include_inactive=False):
"""
@return A generator that yields the chunks of the log file
starting with the first chunk, which is always found directly
after the FileHeader.
If `include_inactive` is set to true, enumerate chunks beyond those
declared in the file header (and may therefore be corrupt).
"""
if include_inactive:
chunk_count = sys.maxsize
else:
chunk_count = self.chunk_count()
i = 0
ofs = self._offset + self.header_chunk_size()
while ofs + 0x10000 <= len(self._buf) and i < chunk_count:
yield ChunkHeader(self._buf, ofs)
ofs += 0x10000
i += 1
|
def function[chunks, parameter[self, include_inactive]]:
constant[
@return A generator that yields the chunks of the log file
starting with the first chunk, which is always found directly
after the FileHeader.
If `include_inactive` is set to true, enumerate chunks beyond those
declared in the file header (and may therefore be corrupt).
]
if name[include_inactive] begin[:]
variable[chunk_count] assign[=] name[sys].maxsize
variable[i] assign[=] constant[0]
variable[ofs] assign[=] binary_operation[name[self]._offset + call[name[self].header_chunk_size, parameter[]]]
while <ast.BoolOp object at 0x7da1b20f8310> begin[:]
<ast.Yield object at 0x7da1b20f9ed0>
<ast.AugAssign object at 0x7da1b20fb9d0>
<ast.AugAssign object at 0x7da1b20fba60>
|
keyword[def] identifier[chunks] ( identifier[self] , identifier[include_inactive] = keyword[False] ):
literal[string]
keyword[if] identifier[include_inactive] :
identifier[chunk_count] = identifier[sys] . identifier[maxsize]
keyword[else] :
identifier[chunk_count] = identifier[self] . identifier[chunk_count] ()
identifier[i] = literal[int]
identifier[ofs] = identifier[self] . identifier[_offset] + identifier[self] . identifier[header_chunk_size] ()
keyword[while] identifier[ofs] + literal[int] <= identifier[len] ( identifier[self] . identifier[_buf] ) keyword[and] identifier[i] < identifier[chunk_count] :
keyword[yield] identifier[ChunkHeader] ( identifier[self] . identifier[_buf] , identifier[ofs] )
identifier[ofs] += literal[int]
identifier[i] += literal[int]
|
def chunks(self, include_inactive=False):
"""
@return A generator that yields the chunks of the log file
starting with the first chunk, which is always found directly
after the FileHeader.
If `include_inactive` is set to true, enumerate chunks beyond those
declared in the file header (and may therefore be corrupt).
"""
if include_inactive:
chunk_count = sys.maxsize # depends on [control=['if'], data=[]]
else:
chunk_count = self.chunk_count()
i = 0
ofs = self._offset + self.header_chunk_size()
while ofs + 65536 <= len(self._buf) and i < chunk_count:
yield ChunkHeader(self._buf, ofs)
ofs += 65536
i += 1 # depends on [control=['while'], data=[]]
|
def get_n_resources_for_iteration(self, iteration, bracket_iteration):
"""Return the number of iterations to run for this barcket_i
This is just util function around `get_n_resources`
"""
bracket = self.get_bracket(iteration=iteration)
n_resources = self.get_resources(bracket=bracket)
return self.get_n_resources(n_resources=n_resources, bracket_iteration=bracket_iteration)
|
def function[get_n_resources_for_iteration, parameter[self, iteration, bracket_iteration]]:
constant[Return the number of iterations to run for this barcket_i
This is just util function around `get_n_resources`
]
variable[bracket] assign[=] call[name[self].get_bracket, parameter[]]
variable[n_resources] assign[=] call[name[self].get_resources, parameter[]]
return[call[name[self].get_n_resources, parameter[]]]
|
keyword[def] identifier[get_n_resources_for_iteration] ( identifier[self] , identifier[iteration] , identifier[bracket_iteration] ):
literal[string]
identifier[bracket] = identifier[self] . identifier[get_bracket] ( identifier[iteration] = identifier[iteration] )
identifier[n_resources] = identifier[self] . identifier[get_resources] ( identifier[bracket] = identifier[bracket] )
keyword[return] identifier[self] . identifier[get_n_resources] ( identifier[n_resources] = identifier[n_resources] , identifier[bracket_iteration] = identifier[bracket_iteration] )
|
def get_n_resources_for_iteration(self, iteration, bracket_iteration):
"""Return the number of iterations to run for this barcket_i
This is just util function around `get_n_resources`
"""
bracket = self.get_bracket(iteration=iteration)
n_resources = self.get_resources(bracket=bracket)
return self.get_n_resources(n_resources=n_resources, bracket_iteration=bracket_iteration)
|
def v_reference_leaf_leafref(ctx, stmt):
"""Verify that all leafrefs in a leaf or leaf-list have correct path"""
if (hasattr(stmt, 'i_leafref') and
stmt.i_leafref is not None and
stmt.i_leafref_expanded is False):
path_type_spec = stmt.i_leafref
not_req_inst = not(path_type_spec.require_instance)
x = validate_leafref_path(ctx, stmt,
path_type_spec.path_spec,
path_type_spec.path_,
accept_non_config_target=not_req_inst
)
if x is None:
return
ptr, expanded_path, path_list = x
path_type_spec.i_target_node = ptr
path_type_spec.i_expanded_path = expanded_path
path_type_spec.i_path_list = path_list
stmt.i_leafref_expanded = True
if ptr is not None:
chk_status(ctx, stmt, ptr)
stmt.i_leafref_ptr = (ptr, path_type_spec.pos)
|
def function[v_reference_leaf_leafref, parameter[ctx, stmt]]:
constant[Verify that all leafrefs in a leaf or leaf-list have correct path]
if <ast.BoolOp object at 0x7da18bc73490> begin[:]
variable[path_type_spec] assign[=] name[stmt].i_leafref
variable[not_req_inst] assign[=] <ast.UnaryOp object at 0x7da18bc70cd0>
variable[x] assign[=] call[name[validate_leafref_path], parameter[name[ctx], name[stmt], name[path_type_spec].path_spec, name[path_type_spec].path_]]
if compare[name[x] is constant[None]] begin[:]
return[None]
<ast.Tuple object at 0x7da18bc730a0> assign[=] name[x]
name[path_type_spec].i_target_node assign[=] name[ptr]
name[path_type_spec].i_expanded_path assign[=] name[expanded_path]
name[path_type_spec].i_path_list assign[=] name[path_list]
name[stmt].i_leafref_expanded assign[=] constant[True]
if compare[name[ptr] is_not constant[None]] begin[:]
call[name[chk_status], parameter[name[ctx], name[stmt], name[ptr]]]
name[stmt].i_leafref_ptr assign[=] tuple[[<ast.Name object at 0x7da20c7c9420>, <ast.Attribute object at 0x7da20c7cab30>]]
|
keyword[def] identifier[v_reference_leaf_leafref] ( identifier[ctx] , identifier[stmt] ):
literal[string]
keyword[if] ( identifier[hasattr] ( identifier[stmt] , literal[string] ) keyword[and]
identifier[stmt] . identifier[i_leafref] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[stmt] . identifier[i_leafref_expanded] keyword[is] keyword[False] ):
identifier[path_type_spec] = identifier[stmt] . identifier[i_leafref]
identifier[not_req_inst] = keyword[not] ( identifier[path_type_spec] . identifier[require_instance] )
identifier[x] = identifier[validate_leafref_path] ( identifier[ctx] , identifier[stmt] ,
identifier[path_type_spec] . identifier[path_spec] ,
identifier[path_type_spec] . identifier[path_] ,
identifier[accept_non_config_target] = identifier[not_req_inst]
)
keyword[if] identifier[x] keyword[is] keyword[None] :
keyword[return]
identifier[ptr] , identifier[expanded_path] , identifier[path_list] = identifier[x]
identifier[path_type_spec] . identifier[i_target_node] = identifier[ptr]
identifier[path_type_spec] . identifier[i_expanded_path] = identifier[expanded_path]
identifier[path_type_spec] . identifier[i_path_list] = identifier[path_list]
identifier[stmt] . identifier[i_leafref_expanded] = keyword[True]
keyword[if] identifier[ptr] keyword[is] keyword[not] keyword[None] :
identifier[chk_status] ( identifier[ctx] , identifier[stmt] , identifier[ptr] )
identifier[stmt] . identifier[i_leafref_ptr] =( identifier[ptr] , identifier[path_type_spec] . identifier[pos] )
|
def v_reference_leaf_leafref(ctx, stmt):
"""Verify that all leafrefs in a leaf or leaf-list have correct path"""
if hasattr(stmt, 'i_leafref') and stmt.i_leafref is not None and (stmt.i_leafref_expanded is False):
path_type_spec = stmt.i_leafref
not_req_inst = not path_type_spec.require_instance
x = validate_leafref_path(ctx, stmt, path_type_spec.path_spec, path_type_spec.path_, accept_non_config_target=not_req_inst)
if x is None:
return # depends on [control=['if'], data=[]]
(ptr, expanded_path, path_list) = x
path_type_spec.i_target_node = ptr
path_type_spec.i_expanded_path = expanded_path
path_type_spec.i_path_list = path_list
stmt.i_leafref_expanded = True
if ptr is not None:
chk_status(ctx, stmt, ptr)
stmt.i_leafref_ptr = (ptr, path_type_spec.pos) # depends on [control=['if'], data=['ptr']] # depends on [control=['if'], data=[]]
|
def determinize(self):
"""
Transforms a Non Deterministic DFA into a Deterministic
Args:
None
Returns:
DFA: The resulting DFA
Creating an equivalent DFA is done using the standard algorithm.
A nice description can be found in the book:
Harry R. Lewis and Christos H. Papadimitriou. 1998.
E
print target_dfa_statelements of the Theory of Computation.
"""
# Compute the \epsilon-closure for all states and save it in a diagram
epsilon_closure = {}
for state in self.states:
sid = state.stateid
epsilon_closure[sid] = self._epsilon_closure(state)
# Get a transition diagram to speed up computations
trans_table = {}
for state in self.states:
trans_table[state.stateid] = defaultdict(set)
for arc in state:
char = self.isyms.find(arc.ilabel)
trans_table[state.stateid][char].add(arc.nextstate)
# is_final function:
# Given a set of nfa states representing a dfa_state return 1 if the
# corresponding DFA state is a final state, i.e. if any of the
# corresponding NFA states are final.
is_final = lambda nfa_states, dfa_state: True \
if sum([ int(nfa_states[x].final) for x in dfa_state ]) >= 1 \
else False
# Precomputation is over, start executing the conversion algorithm
state_idx = 1
nfa_states = copy.deepcopy(self.states)
self.states = []
# Initialize the new DFA state list
self.add_state()
new_initial = epsilon_closure[nfa_states[0].stateid]
self.states[0].final = is_final(nfa_states, new_initial)
dfa_state_idx_map = { frozenset(new_initial) : 0 }
stack = [new_initial]
while True:
# Iterate until all added DFA states are processed.
if not stack:
break
# This is a set of states from the NFA
src_dfa_state = stack.pop()
src_dfa_state_idx = dfa_state_idx_map[frozenset(src_dfa_state)]
for char in self.alphabet:
# Compute the set of target states
target_dfa_state = set([])
for nfa_state in src_dfa_state:
next_states = \
set([y for x in trans_table[nfa_state][char] \
for y in epsilon_closure[x] ])
target_dfa_state.update(next_states)
# If the computed state set is not part of our new DFA add it,
# along with the transition for the current character.
if frozenset(target_dfa_state) not in dfa_state_idx_map:
self.add_state()
dfa_state_idx_map[frozenset(target_dfa_state)] = state_idx
self.states[state_idx].final = is_final(nfa_states,
target_dfa_state)
state_idx += 1
stack.append(target_dfa_state)
dst_state_idx = dfa_state_idx_map[frozenset(target_dfa_state)]
self.add_arc(src_dfa_state_idx, dst_state_idx, char)
return self
|
def function[determinize, parameter[self]]:
constant[
Transforms a Non Deterministic DFA into a Deterministic
Args:
None
Returns:
DFA: The resulting DFA
Creating an equivalent DFA is done using the standard algorithm.
A nice description can be found in the book:
Harry R. Lewis and Christos H. Papadimitriou. 1998.
E
print target_dfa_statelements of the Theory of Computation.
]
variable[epsilon_closure] assign[=] dictionary[[], []]
for taget[name[state]] in starred[name[self].states] begin[:]
variable[sid] assign[=] name[state].stateid
call[name[epsilon_closure]][name[sid]] assign[=] call[name[self]._epsilon_closure, parameter[name[state]]]
variable[trans_table] assign[=] dictionary[[], []]
for taget[name[state]] in starred[name[self].states] begin[:]
call[name[trans_table]][name[state].stateid] assign[=] call[name[defaultdict], parameter[name[set]]]
for taget[name[arc]] in starred[name[state]] begin[:]
variable[char] assign[=] call[name[self].isyms.find, parameter[name[arc].ilabel]]
call[call[call[name[trans_table]][name[state].stateid]][name[char]].add, parameter[name[arc].nextstate]]
variable[is_final] assign[=] <ast.Lambda object at 0x7da18fe92260>
variable[state_idx] assign[=] constant[1]
variable[nfa_states] assign[=] call[name[copy].deepcopy, parameter[name[self].states]]
name[self].states assign[=] list[[]]
call[name[self].add_state, parameter[]]
variable[new_initial] assign[=] call[name[epsilon_closure]][call[name[nfa_states]][constant[0]].stateid]
call[name[self].states][constant[0]].final assign[=] call[name[is_final], parameter[name[nfa_states], name[new_initial]]]
variable[dfa_state_idx_map] assign[=] dictionary[[<ast.Call object at 0x7da20cabf370>], [<ast.Constant object at 0x7da20cabdff0>]]
variable[stack] assign[=] list[[<ast.Name object at 0x7da20cabe0b0>]]
while constant[True] begin[:]
if <ast.UnaryOp object at 0x7da20cabfeb0> begin[:]
break
variable[src_dfa_state] assign[=] call[name[stack].pop, parameter[]]
variable[src_dfa_state_idx] assign[=] call[name[dfa_state_idx_map]][call[name[frozenset], parameter[name[src_dfa_state]]]]
for taget[name[char]] in starred[name[self].alphabet] begin[:]
variable[target_dfa_state] assign[=] call[name[set], parameter[list[[]]]]
for taget[name[nfa_state]] in starred[name[src_dfa_state]] begin[:]
variable[next_states] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da20cabe590>]]
call[name[target_dfa_state].update, parameter[name[next_states]]]
if compare[call[name[frozenset], parameter[name[target_dfa_state]]] <ast.NotIn object at 0x7da2590d7190> name[dfa_state_idx_map]] begin[:]
call[name[self].add_state, parameter[]]
call[name[dfa_state_idx_map]][call[name[frozenset], parameter[name[target_dfa_state]]]] assign[=] name[state_idx]
call[name[self].states][name[state_idx]].final assign[=] call[name[is_final], parameter[name[nfa_states], name[target_dfa_state]]]
<ast.AugAssign object at 0x7da18dc9ac50>
call[name[stack].append, parameter[name[target_dfa_state]]]
variable[dst_state_idx] assign[=] call[name[dfa_state_idx_map]][call[name[frozenset], parameter[name[target_dfa_state]]]]
call[name[self].add_arc, parameter[name[src_dfa_state_idx], name[dst_state_idx], name[char]]]
return[name[self]]
|
keyword[def] identifier[determinize] ( identifier[self] ):
literal[string]
identifier[epsilon_closure] ={}
keyword[for] identifier[state] keyword[in] identifier[self] . identifier[states] :
identifier[sid] = identifier[state] . identifier[stateid]
identifier[epsilon_closure] [ identifier[sid] ]= identifier[self] . identifier[_epsilon_closure] ( identifier[state] )
identifier[trans_table] ={}
keyword[for] identifier[state] keyword[in] identifier[self] . identifier[states] :
identifier[trans_table] [ identifier[state] . identifier[stateid] ]= identifier[defaultdict] ( identifier[set] )
keyword[for] identifier[arc] keyword[in] identifier[state] :
identifier[char] = identifier[self] . identifier[isyms] . identifier[find] ( identifier[arc] . identifier[ilabel] )
identifier[trans_table] [ identifier[state] . identifier[stateid] ][ identifier[char] ]. identifier[add] ( identifier[arc] . identifier[nextstate] )
identifier[is_final] = keyword[lambda] identifier[nfa_states] , identifier[dfa_state] : keyword[True] keyword[if] identifier[sum] ([ identifier[int] ( identifier[nfa_states] [ identifier[x] ]. identifier[final] ) keyword[for] identifier[x] keyword[in] identifier[dfa_state] ])>= literal[int] keyword[else] keyword[False]
identifier[state_idx] = literal[int]
identifier[nfa_states] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[states] )
identifier[self] . identifier[states] =[]
identifier[self] . identifier[add_state] ()
identifier[new_initial] = identifier[epsilon_closure] [ identifier[nfa_states] [ literal[int] ]. identifier[stateid] ]
identifier[self] . identifier[states] [ literal[int] ]. identifier[final] = identifier[is_final] ( identifier[nfa_states] , identifier[new_initial] )
identifier[dfa_state_idx_map] ={ identifier[frozenset] ( identifier[new_initial] ): literal[int] }
identifier[stack] =[ identifier[new_initial] ]
keyword[while] keyword[True] :
keyword[if] keyword[not] identifier[stack] :
keyword[break]
identifier[src_dfa_state] = identifier[stack] . identifier[pop] ()
identifier[src_dfa_state_idx] = identifier[dfa_state_idx_map] [ identifier[frozenset] ( identifier[src_dfa_state] )]
keyword[for] identifier[char] keyword[in] identifier[self] . identifier[alphabet] :
identifier[target_dfa_state] = identifier[set] ([])
keyword[for] identifier[nfa_state] keyword[in] identifier[src_dfa_state] :
identifier[next_states] = identifier[set] ([ identifier[y] keyword[for] identifier[x] keyword[in] identifier[trans_table] [ identifier[nfa_state] ][ identifier[char] ] keyword[for] identifier[y] keyword[in] identifier[epsilon_closure] [ identifier[x] ]])
identifier[target_dfa_state] . identifier[update] ( identifier[next_states] )
keyword[if] identifier[frozenset] ( identifier[target_dfa_state] ) keyword[not] keyword[in] identifier[dfa_state_idx_map] :
identifier[self] . identifier[add_state] ()
identifier[dfa_state_idx_map] [ identifier[frozenset] ( identifier[target_dfa_state] )]= identifier[state_idx]
identifier[self] . identifier[states] [ identifier[state_idx] ]. identifier[final] = identifier[is_final] ( identifier[nfa_states] ,
identifier[target_dfa_state] )
identifier[state_idx] += literal[int]
identifier[stack] . identifier[append] ( identifier[target_dfa_state] )
identifier[dst_state_idx] = identifier[dfa_state_idx_map] [ identifier[frozenset] ( identifier[target_dfa_state] )]
identifier[self] . identifier[add_arc] ( identifier[src_dfa_state_idx] , identifier[dst_state_idx] , identifier[char] )
keyword[return] identifier[self]
|
def determinize(self):
"""
Transforms a Non Deterministic DFA into a Deterministic
Args:
None
Returns:
DFA: The resulting DFA
Creating an equivalent DFA is done using the standard algorithm.
A nice description can be found in the book:
Harry R. Lewis and Christos H. Papadimitriou. 1998.
E
print target_dfa_statelements of the Theory of Computation.
"""
# Compute the \epsilon-closure for all states and save it in a diagram
epsilon_closure = {}
for state in self.states:
sid = state.stateid
epsilon_closure[sid] = self._epsilon_closure(state) # depends on [control=['for'], data=['state']]
# Get a transition diagram to speed up computations
trans_table = {}
for state in self.states:
trans_table[state.stateid] = defaultdict(set)
for arc in state:
char = self.isyms.find(arc.ilabel)
trans_table[state.stateid][char].add(arc.nextstate) # depends on [control=['for'], data=['arc']] # depends on [control=['for'], data=['state']]
# is_final function:
# Given a set of nfa states representing a dfa_state return 1 if the
# corresponding DFA state is a final state, i.e. if any of the
# corresponding NFA states are final.
is_final = lambda nfa_states, dfa_state: True if sum([int(nfa_states[x].final) for x in dfa_state]) >= 1 else False
# Precomputation is over, start executing the conversion algorithm
state_idx = 1
nfa_states = copy.deepcopy(self.states)
self.states = []
# Initialize the new DFA state list
self.add_state()
new_initial = epsilon_closure[nfa_states[0].stateid]
self.states[0].final = is_final(nfa_states, new_initial)
dfa_state_idx_map = {frozenset(new_initial): 0}
stack = [new_initial]
while True:
# Iterate until all added DFA states are processed.
if not stack:
break # depends on [control=['if'], data=[]]
# This is a set of states from the NFA
src_dfa_state = stack.pop()
src_dfa_state_idx = dfa_state_idx_map[frozenset(src_dfa_state)]
for char in self.alphabet:
# Compute the set of target states
target_dfa_state = set([])
for nfa_state in src_dfa_state:
next_states = set([y for x in trans_table[nfa_state][char] for y in epsilon_closure[x]])
target_dfa_state.update(next_states) # depends on [control=['for'], data=['nfa_state']]
# If the computed state set is not part of our new DFA add it,
# along with the transition for the current character.
if frozenset(target_dfa_state) not in dfa_state_idx_map:
self.add_state()
dfa_state_idx_map[frozenset(target_dfa_state)] = state_idx
self.states[state_idx].final = is_final(nfa_states, target_dfa_state)
state_idx += 1
stack.append(target_dfa_state) # depends on [control=['if'], data=['dfa_state_idx_map']]
dst_state_idx = dfa_state_idx_map[frozenset(target_dfa_state)]
self.add_arc(src_dfa_state_idx, dst_state_idx, char) # depends on [control=['for'], data=['char']] # depends on [control=['while'], data=[]]
return self
|
def _check_cython_sources(self, extension):
"""
Where relevant, make sure that the .c files associated with .pyx
modules are present (if building without Cython installed).
"""
# Determine the compiler we'll be using
if self.compiler is None:
compiler = get_default_compiler()
else:
compiler = self.compiler
# Replace .pyx with C-equivalents, unless c files are missing
for jdx, src in enumerate(extension.sources):
base, ext = os.path.splitext(src)
pyxfn = base + '.pyx'
cfn = base + '.c'
cppfn = base + '.cpp'
if not os.path.isfile(pyxfn):
continue
if self._uses_cython:
extension.sources[jdx] = pyxfn
else:
if os.path.isfile(cfn):
extension.sources[jdx] = cfn
elif os.path.isfile(cppfn):
extension.sources[jdx] = cppfn
else:
msg = (
'Could not find C/C++ file {0}.(c/cpp) for Cython '
'file {1} when building extension {2}. Cython '
'must be installed to build from a git '
'checkout.'.format(base, pyxfn, extension.name))
raise IOError(errno.ENOENT, msg, cfn)
# Cython (at least as of 0.29.2) uses deprecated Numpy API features
# the use of which produces a few warnings when compiling.
# These additional flags should squelch those warnings.
# TODO: Feel free to remove this if/when a Cython update
# removes use of the deprecated Numpy API
if compiler == 'unix':
extension.extra_compile_args.extend([
'-Wp,-w', '-Wno-unused-function'])
|
def function[_check_cython_sources, parameter[self, extension]]:
constant[
Where relevant, make sure that the .c files associated with .pyx
modules are present (if building without Cython installed).
]
if compare[name[self].compiler is constant[None]] begin[:]
variable[compiler] assign[=] call[name[get_default_compiler], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6ab580>, <ast.Name object at 0x7da20c6aa4d0>]]] in starred[call[name[enumerate], parameter[name[extension].sources]]] begin[:]
<ast.Tuple object at 0x7da20c6a82b0> assign[=] call[name[os].path.splitext, parameter[name[src]]]
variable[pyxfn] assign[=] binary_operation[name[base] + constant[.pyx]]
variable[cfn] assign[=] binary_operation[name[base] + constant[.c]]
variable[cppfn] assign[=] binary_operation[name[base] + constant[.cpp]]
if <ast.UnaryOp object at 0x7da20c6abf70> begin[:]
continue
if name[self]._uses_cython begin[:]
call[name[extension].sources][name[jdx]] assign[=] name[pyxfn]
if compare[name[compiler] equal[==] constant[unix]] begin[:]
call[name[extension].extra_compile_args.extend, parameter[list[[<ast.Constant object at 0x7da18c4ce2c0>, <ast.Constant object at 0x7da18c4cefb0>]]]]
|
keyword[def] identifier[_check_cython_sources] ( identifier[self] , identifier[extension] ):
literal[string]
keyword[if] identifier[self] . identifier[compiler] keyword[is] keyword[None] :
identifier[compiler] = identifier[get_default_compiler] ()
keyword[else] :
identifier[compiler] = identifier[self] . identifier[compiler]
keyword[for] identifier[jdx] , identifier[src] keyword[in] identifier[enumerate] ( identifier[extension] . identifier[sources] ):
identifier[base] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[src] )
identifier[pyxfn] = identifier[base] + literal[string]
identifier[cfn] = identifier[base] + literal[string]
identifier[cppfn] = identifier[base] + literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[pyxfn] ):
keyword[continue]
keyword[if] identifier[self] . identifier[_uses_cython] :
identifier[extension] . identifier[sources] [ identifier[jdx] ]= identifier[pyxfn]
keyword[else] :
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[cfn] ):
identifier[extension] . identifier[sources] [ identifier[jdx] ]= identifier[cfn]
keyword[elif] identifier[os] . identifier[path] . identifier[isfile] ( identifier[cppfn] ):
identifier[extension] . identifier[sources] [ identifier[jdx] ]= identifier[cppfn]
keyword[else] :
identifier[msg] =(
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[base] , identifier[pyxfn] , identifier[extension] . identifier[name] ))
keyword[raise] identifier[IOError] ( identifier[errno] . identifier[ENOENT] , identifier[msg] , identifier[cfn] )
keyword[if] identifier[compiler] == literal[string] :
identifier[extension] . identifier[extra_compile_args] . identifier[extend] ([
literal[string] , literal[string] ])
|
def _check_cython_sources(self, extension):
"""
Where relevant, make sure that the .c files associated with .pyx
modules are present (if building without Cython installed).
"""
# Determine the compiler we'll be using
if self.compiler is None:
compiler = get_default_compiler() # depends on [control=['if'], data=[]]
else:
compiler = self.compiler
# Replace .pyx with C-equivalents, unless c files are missing
for (jdx, src) in enumerate(extension.sources):
(base, ext) = os.path.splitext(src)
pyxfn = base + '.pyx'
cfn = base + '.c'
cppfn = base + '.cpp'
if not os.path.isfile(pyxfn):
continue # depends on [control=['if'], data=[]]
if self._uses_cython:
extension.sources[jdx] = pyxfn # depends on [control=['if'], data=[]]
elif os.path.isfile(cfn):
extension.sources[jdx] = cfn # depends on [control=['if'], data=[]]
elif os.path.isfile(cppfn):
extension.sources[jdx] = cppfn # depends on [control=['if'], data=[]]
else:
msg = 'Could not find C/C++ file {0}.(c/cpp) for Cython file {1} when building extension {2}. Cython must be installed to build from a git checkout.'.format(base, pyxfn, extension.name)
raise IOError(errno.ENOENT, msg, cfn)
# Cython (at least as of 0.29.2) uses deprecated Numpy API features
# the use of which produces a few warnings when compiling.
# These additional flags should squelch those warnings.
# TODO: Feel free to remove this if/when a Cython update
# removes use of the deprecated Numpy API
if compiler == 'unix':
extension.extra_compile_args.extend(['-Wp,-w', '-Wno-unused-function']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def topil(self):
"""Returns a PIL.Image version of this Pix"""
from PIL import Image
# Leptonica manages data in words, so it implicitly does an endian
# swap. Tell Pillow about this when it reads the data.
pix = self
if sys.byteorder == 'little':
if self.mode == 'RGB':
raw_mode = 'XBGR'
elif self.mode == 'RGBA':
raw_mode = 'ABGR'
elif self.mode == '1':
raw_mode = '1;I'
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata))
else:
raw_mode = self.mode
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata))
else:
raw_mode = self.mode # no endian swap needed
size = (pix._cdata.w, pix._cdata.h)
bytecount = pix._cdata.wpl * 4 * pix._cdata.h
buf = ffi.buffer(pix._cdata.data, bytecount)
stride = pix._cdata.wpl * 4
im = Image.frombytes(self.mode, size, buf, 'raw', raw_mode, stride)
return im
|
def function[topil, parameter[self]]:
constant[Returns a PIL.Image version of this Pix]
from relative_module[PIL] import module[Image]
variable[pix] assign[=] name[self]
if compare[name[sys].byteorder equal[==] constant[little]] begin[:]
if compare[name[self].mode equal[==] constant[RGB]] begin[:]
variable[raw_mode] assign[=] constant[XBGR]
variable[size] assign[=] tuple[[<ast.Attribute object at 0x7da1b1bd2e60>, <ast.Attribute object at 0x7da1b1bd3fd0>]]
variable[bytecount] assign[=] binary_operation[binary_operation[name[pix]._cdata.wpl * constant[4]] * name[pix]._cdata.h]
variable[buf] assign[=] call[name[ffi].buffer, parameter[name[pix]._cdata.data, name[bytecount]]]
variable[stride] assign[=] binary_operation[name[pix]._cdata.wpl * constant[4]]
variable[im] assign[=] call[name[Image].frombytes, parameter[name[self].mode, name[size], name[buf], constant[raw], name[raw_mode], name[stride]]]
return[name[im]]
|
keyword[def] identifier[topil] ( identifier[self] ):
literal[string]
keyword[from] identifier[PIL] keyword[import] identifier[Image]
identifier[pix] = identifier[self]
keyword[if] identifier[sys] . identifier[byteorder] == literal[string] :
keyword[if] identifier[self] . identifier[mode] == literal[string] :
identifier[raw_mode] = literal[string]
keyword[elif] identifier[self] . identifier[mode] == literal[string] :
identifier[raw_mode] = literal[string]
keyword[elif] identifier[self] . identifier[mode] == literal[string] :
identifier[raw_mode] = literal[string]
identifier[pix] = identifier[Pix] ( identifier[lept] . identifier[pixEndianByteSwapNew] ( identifier[pix] . identifier[_cdata] ))
keyword[else] :
identifier[raw_mode] = identifier[self] . identifier[mode]
identifier[pix] = identifier[Pix] ( identifier[lept] . identifier[pixEndianByteSwapNew] ( identifier[pix] . identifier[_cdata] ))
keyword[else] :
identifier[raw_mode] = identifier[self] . identifier[mode]
identifier[size] =( identifier[pix] . identifier[_cdata] . identifier[w] , identifier[pix] . identifier[_cdata] . identifier[h] )
identifier[bytecount] = identifier[pix] . identifier[_cdata] . identifier[wpl] * literal[int] * identifier[pix] . identifier[_cdata] . identifier[h]
identifier[buf] = identifier[ffi] . identifier[buffer] ( identifier[pix] . identifier[_cdata] . identifier[data] , identifier[bytecount] )
identifier[stride] = identifier[pix] . identifier[_cdata] . identifier[wpl] * literal[int]
identifier[im] = identifier[Image] . identifier[frombytes] ( identifier[self] . identifier[mode] , identifier[size] , identifier[buf] , literal[string] , identifier[raw_mode] , identifier[stride] )
keyword[return] identifier[im]
|
def topil(self):
"""Returns a PIL.Image version of this Pix"""
from PIL import Image
# Leptonica manages data in words, so it implicitly does an endian
# swap. Tell Pillow about this when it reads the data.
pix = self
if sys.byteorder == 'little':
if self.mode == 'RGB':
raw_mode = 'XBGR' # depends on [control=['if'], data=[]]
elif self.mode == 'RGBA':
raw_mode = 'ABGR' # depends on [control=['if'], data=[]]
elif self.mode == '1':
raw_mode = '1;I'
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata)) # depends on [control=['if'], data=[]]
else:
raw_mode = self.mode
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata)) # depends on [control=['if'], data=[]]
else:
raw_mode = self.mode # no endian swap needed
size = (pix._cdata.w, pix._cdata.h)
bytecount = pix._cdata.wpl * 4 * pix._cdata.h
buf = ffi.buffer(pix._cdata.data, bytecount)
stride = pix._cdata.wpl * 4
im = Image.frombytes(self.mode, size, buf, 'raw', raw_mode, stride)
return im
|
def atomic_to_cim_xml(obj):
"""
Convert an "atomic" scalar value to a CIM-XML string and return that
string.
The returned CIM-XML string is ready for use as the text of a CIM-XML
'VALUE' element.
Parameters:
obj (:term:`CIM data type`, :term:`number`, :class:`py:datetime`):
The "atomic" input value. May be `None`.
Must not be an array/list/tuple. Must not be a :ref:`CIM object`.
Returns:
A :term:`unicode string` object in CIM-XML value format representing
the input value. `None`, if the input value is `None`.
Raises:
TypeError
"""
if obj is None: # pylint: disable=no-else-return
return obj
elif isinstance(obj, six.text_type):
return obj
elif isinstance(obj, six.binary_type):
return _to_unicode(obj)
elif isinstance(obj, bool):
return u'TRUE' if obj else u'FALSE'
elif isinstance(obj, (CIMInt, six.integer_types, CIMDateTime)):
return six.text_type(obj)
elif isinstance(obj, datetime):
return six.text_type(CIMDateTime(obj))
elif isinstance(obj, Real32):
# DSP0201 requirements for representing real32:
# The significand must be represented with at least 11 digits.
# The special values must have the case: INF, -INF, NaN.
s = u'{0:.11G}'.format(obj)
if s == 'NAN':
s = u'NaN'
elif s in ('INF', '-INF'):
pass
elif '.' not in s:
parts = s.split('E')
parts[0] = parts[0] + '.0'
s = 'E'.join(parts)
return s
elif isinstance(obj, (Real64, float)):
# DSP0201 requirements for representing real64:
# The significand must be represented with at least 17 digits.
# The special values must have the case: INF, -INF, NaN.
s = u'{0:.17G}'.format(obj)
if s == 'NAN':
s = u'NaN'
elif s in ('INF', '-INF'):
pass
elif '.' not in s:
parts = s.split('E')
parts[0] = parts[0] + '.0'
s = 'E'.join(parts)
return s
else:
raise TypeError(
_format("Value {0!A} has invalid type {1} for conversion to a "
"CIM-XML string", obj, type(obj)))
|
def function[atomic_to_cim_xml, parameter[obj]]:
constant[
Convert an "atomic" scalar value to a CIM-XML string and return that
string.
The returned CIM-XML string is ready for use as the text of a CIM-XML
'VALUE' element.
Parameters:
obj (:term:`CIM data type`, :term:`number`, :class:`py:datetime`):
The "atomic" input value. May be `None`.
Must not be an array/list/tuple. Must not be a :ref:`CIM object`.
Returns:
A :term:`unicode string` object in CIM-XML value format representing
the input value. `None`, if the input value is `None`.
Raises:
TypeError
]
if compare[name[obj] is constant[None]] begin[:]
return[name[obj]]
|
keyword[def] identifier[atomic_to_cim_xml] ( identifier[obj] ):
literal[string]
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[return] identifier[obj]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[six] . identifier[text_type] ):
keyword[return] identifier[obj]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[six] . identifier[binary_type] ):
keyword[return] identifier[_to_unicode] ( identifier[obj] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[bool] ):
keyword[return] literal[string] keyword[if] identifier[obj] keyword[else] literal[string]
keyword[elif] identifier[isinstance] ( identifier[obj] ,( identifier[CIMInt] , identifier[six] . identifier[integer_types] , identifier[CIMDateTime] )):
keyword[return] identifier[six] . identifier[text_type] ( identifier[obj] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[datetime] ):
keyword[return] identifier[six] . identifier[text_type] ( identifier[CIMDateTime] ( identifier[obj] ))
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[Real32] ):
identifier[s] = literal[string] . identifier[format] ( identifier[obj] )
keyword[if] identifier[s] == literal[string] :
identifier[s] = literal[string]
keyword[elif] identifier[s] keyword[in] ( literal[string] , literal[string] ):
keyword[pass]
keyword[elif] literal[string] keyword[not] keyword[in] identifier[s] :
identifier[parts] = identifier[s] . identifier[split] ( literal[string] )
identifier[parts] [ literal[int] ]= identifier[parts] [ literal[int] ]+ literal[string]
identifier[s] = literal[string] . identifier[join] ( identifier[parts] )
keyword[return] identifier[s]
keyword[elif] identifier[isinstance] ( identifier[obj] ,( identifier[Real64] , identifier[float] )):
identifier[s] = literal[string] . identifier[format] ( identifier[obj] )
keyword[if] identifier[s] == literal[string] :
identifier[s] = literal[string]
keyword[elif] identifier[s] keyword[in] ( literal[string] , literal[string] ):
keyword[pass]
keyword[elif] literal[string] keyword[not] keyword[in] identifier[s] :
identifier[parts] = identifier[s] . identifier[split] ( literal[string] )
identifier[parts] [ literal[int] ]= identifier[parts] [ literal[int] ]+ literal[string]
identifier[s] = literal[string] . identifier[join] ( identifier[parts] )
keyword[return] identifier[s]
keyword[else] :
keyword[raise] identifier[TypeError] (
identifier[_format] ( literal[string]
literal[string] , identifier[obj] , identifier[type] ( identifier[obj] )))
|
def atomic_to_cim_xml(obj):
"""
Convert an "atomic" scalar value to a CIM-XML string and return that
string.
The returned CIM-XML string is ready for use as the text of a CIM-XML
'VALUE' element.
Parameters:
obj (:term:`CIM data type`, :term:`number`, :class:`py:datetime`):
The "atomic" input value. May be `None`.
Must not be an array/list/tuple. Must not be a :ref:`CIM object`.
Returns:
A :term:`unicode string` object in CIM-XML value format representing
the input value. `None`, if the input value is `None`.
Raises:
TypeError
"""
if obj is None: # pylint: disable=no-else-return
return obj # depends on [control=['if'], data=['obj']]
elif isinstance(obj, six.text_type):
return obj # depends on [control=['if'], data=[]]
elif isinstance(obj, six.binary_type):
return _to_unicode(obj) # depends on [control=['if'], data=[]]
elif isinstance(obj, bool):
return u'TRUE' if obj else u'FALSE' # depends on [control=['if'], data=[]]
elif isinstance(obj, (CIMInt, six.integer_types, CIMDateTime)):
return six.text_type(obj) # depends on [control=['if'], data=[]]
elif isinstance(obj, datetime):
return six.text_type(CIMDateTime(obj)) # depends on [control=['if'], data=[]]
elif isinstance(obj, Real32):
# DSP0201 requirements for representing real32:
# The significand must be represented with at least 11 digits.
# The special values must have the case: INF, -INF, NaN.
s = u'{0:.11G}'.format(obj)
if s == 'NAN':
s = u'NaN' # depends on [control=['if'], data=['s']]
elif s in ('INF', '-INF'):
pass # depends on [control=['if'], data=[]]
elif '.' not in s:
parts = s.split('E')
parts[0] = parts[0] + '.0'
s = 'E'.join(parts) # depends on [control=['if'], data=['s']]
return s # depends on [control=['if'], data=[]]
elif isinstance(obj, (Real64, float)):
# DSP0201 requirements for representing real64:
# The significand must be represented with at least 17 digits.
# The special values must have the case: INF, -INF, NaN.
s = u'{0:.17G}'.format(obj)
if s == 'NAN':
s = u'NaN' # depends on [control=['if'], data=['s']]
elif s in ('INF', '-INF'):
pass # depends on [control=['if'], data=[]]
elif '.' not in s:
parts = s.split('E')
parts[0] = parts[0] + '.0'
s = 'E'.join(parts) # depends on [control=['if'], data=['s']]
return s # depends on [control=['if'], data=[]]
else:
raise TypeError(_format('Value {0!A} has invalid type {1} for conversion to a CIM-XML string', obj, type(obj)))
|
def _import_module(name, package=None):
"""
根据模块名载入模块
:param name:
:param package:
:return:
"""
if name.startswith('.'):
name = '{package}.{module}'.format(package=package, module=str(name).strip('.'))
__import__(name)
return sys.modules[name]
|
def function[_import_module, parameter[name, package]]:
constant[
根据模块名载入模块
:param name:
:param package:
:return:
]
if call[name[name].startswith, parameter[constant[.]]] begin[:]
variable[name] assign[=] call[constant[{package}.{module}].format, parameter[]]
call[name[__import__], parameter[name[name]]]
return[call[name[sys].modules][name[name]]]
|
keyword[def] identifier[_import_module] ( identifier[name] , identifier[package] = keyword[None] ):
literal[string]
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ):
identifier[name] = literal[string] . identifier[format] ( identifier[package] = identifier[package] , identifier[module] = identifier[str] ( identifier[name] ). identifier[strip] ( literal[string] ))
identifier[__import__] ( identifier[name] )
keyword[return] identifier[sys] . identifier[modules] [ identifier[name] ]
|
def _import_module(name, package=None):
"""
根据模块名载入模块
:param name:
:param package:
:return:
"""
if name.startswith('.'):
name = '{package}.{module}'.format(package=package, module=str(name).strip('.')) # depends on [control=['if'], data=[]]
__import__(name)
return sys.modules[name]
|
def main(pyc_file, asm_path):
"""
Create Python bytecode from a Python assembly file.
ASM_PATH gives the input Python assembly file. We suggest ending the
file in .pyc
If --pyc-file is given, that indicates the path to write the
Python bytecode. The path should end in '.pyc'.
See https://github.com/rocky/python-xasm/blob/master/HOW-TO-USE.rst
for how to write a Python assembler file.
"""
if os.stat(asm_path).st_size == 0:
print("Size of assembly file %s is zero" % asm_path)
sys.exit(1)
asm = asm_file(asm_path)
if not pyc_file and asm_path.endswith('.pyasm'):
pyc_file = asm_path[:-len('.pyasm')] + '.pyc'
write_pycfile(pyc_file, asm)
|
def function[main, parameter[pyc_file, asm_path]]:
constant[
Create Python bytecode from a Python assembly file.
ASM_PATH gives the input Python assembly file. We suggest ending the
file in .pyc
If --pyc-file is given, that indicates the path to write the
Python bytecode. The path should end in '.pyc'.
See https://github.com/rocky/python-xasm/blob/master/HOW-TO-USE.rst
for how to write a Python assembler file.
]
if compare[call[name[os].stat, parameter[name[asm_path]]].st_size equal[==] constant[0]] begin[:]
call[name[print], parameter[binary_operation[constant[Size of assembly file %s is zero] <ast.Mod object at 0x7da2590d6920> name[asm_path]]]]
call[name[sys].exit, parameter[constant[1]]]
variable[asm] assign[=] call[name[asm_file], parameter[name[asm_path]]]
if <ast.BoolOp object at 0x7da1b251ad40> begin[:]
variable[pyc_file] assign[=] binary_operation[call[name[asm_path]][<ast.Slice object at 0x7da1b251aaa0>] + constant[.pyc]]
call[name[write_pycfile], parameter[name[pyc_file], name[asm]]]
|
keyword[def] identifier[main] ( identifier[pyc_file] , identifier[asm_path] ):
literal[string]
keyword[if] identifier[os] . identifier[stat] ( identifier[asm_path] ). identifier[st_size] == literal[int] :
identifier[print] ( literal[string] % identifier[asm_path] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[asm] = identifier[asm_file] ( identifier[asm_path] )
keyword[if] keyword[not] identifier[pyc_file] keyword[and] identifier[asm_path] . identifier[endswith] ( literal[string] ):
identifier[pyc_file] = identifier[asm_path] [:- identifier[len] ( literal[string] )]+ literal[string]
identifier[write_pycfile] ( identifier[pyc_file] , identifier[asm] )
|
def main(pyc_file, asm_path):
"""
Create Python bytecode from a Python assembly file.
ASM_PATH gives the input Python assembly file. We suggest ending the
file in .pyc
If --pyc-file is given, that indicates the path to write the
Python bytecode. The path should end in '.pyc'.
See https://github.com/rocky/python-xasm/blob/master/HOW-TO-USE.rst
for how to write a Python assembler file.
"""
if os.stat(asm_path).st_size == 0:
print('Size of assembly file %s is zero' % asm_path)
sys.exit(1) # depends on [control=['if'], data=[]]
asm = asm_file(asm_path)
if not pyc_file and asm_path.endswith('.pyasm'):
pyc_file = asm_path[:-len('.pyasm')] + '.pyc' # depends on [control=['if'], data=[]]
write_pycfile(pyc_file, asm)
|
def _lock(self, name, client_id, request_id):
"""Handles locking
Locking time is stored to determine time out.
If a lock is timed out it can be acquired by a different client.
"""
if name in self._locks:
other_client_id, other_request_id, lock_time = self._locks[name]
if other_client_id == client_id:
response = (self.LOCK_ERROR + self.DELIMITER +
'Re-request of lock `%s` (old request id `%s`) by `%s` '
'(request id `%s`)' % (name, client_id, other_request_id, request_id))
self._logger.warning(response)
return response
else:
current_time = time.time()
if current_time - lock_time < self._timeout:
return self.WAIT
else:
response = (self.GO + self.DELIMITER + 'Lock `%s` by `%s` (old request id `%s) '
'timed out' % (name,
other_client_id,
other_request_id))
self._logger.info(response)
self._locks[name] = (client_id, request_id, time.time())
self._timeout_locks[(name, other_client_id)] = (request_id, lock_time)
return response
else:
self._locks[name] = (client_id, request_id, time.time())
return self.GO
|
def function[_lock, parameter[self, name, client_id, request_id]]:
constant[Handles locking
Locking time is stored to determine time out.
If a lock is timed out it can be acquired by a different client.
]
if compare[name[name] in name[self]._locks] begin[:]
<ast.Tuple object at 0x7da1b26ad3c0> assign[=] call[name[self]._locks][name[name]]
if compare[name[other_client_id] equal[==] name[client_id]] begin[:]
variable[response] assign[=] binary_operation[binary_operation[name[self].LOCK_ERROR + name[self].DELIMITER] + binary_operation[constant[Re-request of lock `%s` (old request id `%s`) by `%s` (request id `%s`)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26afcd0>, <ast.Name object at 0x7da1b26ae500>, <ast.Name object at 0x7da1b26ac670>, <ast.Name object at 0x7da1b26afa30>]]]]
call[name[self]._logger.warning, parameter[name[response]]]
return[name[response]]
|
keyword[def] identifier[_lock] ( identifier[self] , identifier[name] , identifier[client_id] , identifier[request_id] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[_locks] :
identifier[other_client_id] , identifier[other_request_id] , identifier[lock_time] = identifier[self] . identifier[_locks] [ identifier[name] ]
keyword[if] identifier[other_client_id] == identifier[client_id] :
identifier[response] =( identifier[self] . identifier[LOCK_ERROR] + identifier[self] . identifier[DELIMITER] +
literal[string]
literal[string] %( identifier[name] , identifier[client_id] , identifier[other_request_id] , identifier[request_id] ))
identifier[self] . identifier[_logger] . identifier[warning] ( identifier[response] )
keyword[return] identifier[response]
keyword[else] :
identifier[current_time] = identifier[time] . identifier[time] ()
keyword[if] identifier[current_time] - identifier[lock_time] < identifier[self] . identifier[_timeout] :
keyword[return] identifier[self] . identifier[WAIT]
keyword[else] :
identifier[response] =( identifier[self] . identifier[GO] + identifier[self] . identifier[DELIMITER] + literal[string]
literal[string] %( identifier[name] ,
identifier[other_client_id] ,
identifier[other_request_id] ))
identifier[self] . identifier[_logger] . identifier[info] ( identifier[response] )
identifier[self] . identifier[_locks] [ identifier[name] ]=( identifier[client_id] , identifier[request_id] , identifier[time] . identifier[time] ())
identifier[self] . identifier[_timeout_locks] [( identifier[name] , identifier[other_client_id] )]=( identifier[request_id] , identifier[lock_time] )
keyword[return] identifier[response]
keyword[else] :
identifier[self] . identifier[_locks] [ identifier[name] ]=( identifier[client_id] , identifier[request_id] , identifier[time] . identifier[time] ())
keyword[return] identifier[self] . identifier[GO]
|
def _lock(self, name, client_id, request_id):
"""Handles locking
Locking time is stored to determine time out.
If a lock is timed out it can be acquired by a different client.
"""
if name in self._locks:
(other_client_id, other_request_id, lock_time) = self._locks[name]
if other_client_id == client_id:
response = self.LOCK_ERROR + self.DELIMITER + 'Re-request of lock `%s` (old request id `%s`) by `%s` (request id `%s`)' % (name, client_id, other_request_id, request_id)
self._logger.warning(response)
return response # depends on [control=['if'], data=['client_id']]
else:
current_time = time.time()
if current_time - lock_time < self._timeout:
return self.WAIT # depends on [control=['if'], data=[]]
else:
response = self.GO + self.DELIMITER + 'Lock `%s` by `%s` (old request id `%s) timed out' % (name, other_client_id, other_request_id)
self._logger.info(response)
self._locks[name] = (client_id, request_id, time.time())
self._timeout_locks[name, other_client_id] = (request_id, lock_time)
return response # depends on [control=['if'], data=['name']]
else:
self._locks[name] = (client_id, request_id, time.time())
return self.GO
|
def reduce(self, sum1, sum2, *args):
"""
The internal reduce function that sums the results from various
processors
"""
self.sum1g[...] += sum1
if not self.pts_only: self.sum2g[...] += sum2
if self.compute_mean_coords:
N, centers_sum = args
self.N[...] += N
for i in range(self.bins.Ndim):
self.centers[i][...] += centers_sum[i]
|
def function[reduce, parameter[self, sum1, sum2]]:
constant[
The internal reduce function that sums the results from various
processors
]
<ast.AugAssign object at 0x7da1b18bac20>
if <ast.UnaryOp object at 0x7da1b18ba650> begin[:]
<ast.AugAssign object at 0x7da1b18bb910>
if name[self].compute_mean_coords begin[:]
<ast.Tuple object at 0x7da18f811870> assign[=] name[args]
<ast.AugAssign object at 0x7da18f813940>
for taget[name[i]] in starred[call[name[range], parameter[name[self].bins.Ndim]]] begin[:]
<ast.AugAssign object at 0x7da18f811510>
|
keyword[def] identifier[reduce] ( identifier[self] , identifier[sum1] , identifier[sum2] ,* identifier[args] ):
literal[string]
identifier[self] . identifier[sum1g] [...]+= identifier[sum1]
keyword[if] keyword[not] identifier[self] . identifier[pts_only] : identifier[self] . identifier[sum2g] [...]+= identifier[sum2]
keyword[if] identifier[self] . identifier[compute_mean_coords] :
identifier[N] , identifier[centers_sum] = identifier[args]
identifier[self] . identifier[N] [...]+= identifier[N]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[bins] . identifier[Ndim] ):
identifier[self] . identifier[centers] [ identifier[i] ][...]+= identifier[centers_sum] [ identifier[i] ]
|
def reduce(self, sum1, sum2, *args):
"""
The internal reduce function that sums the results from various
processors
"""
self.sum1g[...] += sum1
if not self.pts_only:
self.sum2g[...] += sum2 # depends on [control=['if'], data=[]]
if self.compute_mean_coords:
(N, centers_sum) = args
self.N[...] += N
for i in range(self.bins.Ndim):
self.centers[i][...] += centers_sum[i] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
|
def zremrangebyrank(self, name, rank_start, rank_end):
"""
Remove the elements of the zset which have rank in the range
[rank_start,rank_end].
.. note:: The range is [``rank_start``, ``rank_end``]
:param string name: the zset name
:param int rank_start: zero or positive,the start position
:param int rank_end: zero or positive,the end position
:return: the number of deleted elements
:rtype: int
>>> ssdb.zremrangebyrank('zset_1', 0, 2)
3
>>> ssdb.zremrangebyrank('zset_1', 1, 4)
5
>>> ssdb.zremrangebyrank('zset_1', 0, 0)
1
"""
rank_start = get_nonnegative_integer('rank_start', rank_start)
rank_end = get_nonnegative_integer('rank_end', rank_end)
return self.execute_command('zremrangebyrank', name, rank_start,
rank_end)
|
def function[zremrangebyrank, parameter[self, name, rank_start, rank_end]]:
constant[
Remove the elements of the zset which have rank in the range
[rank_start,rank_end].
.. note:: The range is [``rank_start``, ``rank_end``]
:param string name: the zset name
:param int rank_start: zero or positive,the start position
:param int rank_end: zero or positive,the end position
:return: the number of deleted elements
:rtype: int
>>> ssdb.zremrangebyrank('zset_1', 0, 2)
3
>>> ssdb.zremrangebyrank('zset_1', 1, 4)
5
>>> ssdb.zremrangebyrank('zset_1', 0, 0)
1
]
variable[rank_start] assign[=] call[name[get_nonnegative_integer], parameter[constant[rank_start], name[rank_start]]]
variable[rank_end] assign[=] call[name[get_nonnegative_integer], parameter[constant[rank_end], name[rank_end]]]
return[call[name[self].execute_command, parameter[constant[zremrangebyrank], name[name], name[rank_start], name[rank_end]]]]
|
keyword[def] identifier[zremrangebyrank] ( identifier[self] , identifier[name] , identifier[rank_start] , identifier[rank_end] ):
literal[string]
identifier[rank_start] = identifier[get_nonnegative_integer] ( literal[string] , identifier[rank_start] )
identifier[rank_end] = identifier[get_nonnegative_integer] ( literal[string] , identifier[rank_end] )
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[rank_start] ,
identifier[rank_end] )
|
def zremrangebyrank(self, name, rank_start, rank_end):
"""
Remove the elements of the zset which have rank in the range
[rank_start,rank_end].
.. note:: The range is [``rank_start``, ``rank_end``]
:param string name: the zset name
:param int rank_start: zero or positive,the start position
:param int rank_end: zero or positive,the end position
:return: the number of deleted elements
:rtype: int
>>> ssdb.zremrangebyrank('zset_1', 0, 2)
3
>>> ssdb.zremrangebyrank('zset_1', 1, 4)
5
>>> ssdb.zremrangebyrank('zset_1', 0, 0)
1
"""
rank_start = get_nonnegative_integer('rank_start', rank_start)
rank_end = get_nonnegative_integer('rank_end', rank_end)
return self.execute_command('zremrangebyrank', name, rank_start, rank_end)
|
def list_instances(self, tags=None, cpus=None, memory=None, hostname=None,
disk=None, datacenter=None, **kwargs):
"""Retrieve a list of all dedicated hosts on the account
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string disk: filter based on disk
:param string datacenter: filter based on datacenter
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching dedicated host.
"""
if 'mask' not in kwargs:
items = [
'id',
'name',
'cpuCount',
'diskCapacity',
'memoryCapacity',
'datacenter',
'guestCount',
]
kwargs['mask'] = "mask[%s]" % ','.join(items)
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['dedicatedHosts']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if hostname:
_filter['dedicatedHosts']['name'] = (
utils.query_filter(hostname)
)
if cpus:
_filter['dedicatedHosts']['cpuCount'] = utils.query_filter(cpus)
if disk:
_filter['dedicatedHosts']['diskCapacity'] = (
utils.query_filter(disk))
if memory:
_filter['dedicatedHosts']['memoryCapacity'] = (
utils.query_filter(memory))
if datacenter:
_filter['dedicatedHosts']['datacenter']['name'] = (
utils.query_filter(datacenter))
kwargs['filter'] = _filter.to_dict()
return self.account.getDedicatedHosts(**kwargs)
|
def function[list_instances, parameter[self, tags, cpus, memory, hostname, disk, datacenter]]:
constant[Retrieve a list of all dedicated hosts on the account
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string disk: filter based on disk
:param string datacenter: filter based on datacenter
:param dict \*\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching dedicated host.
]
if compare[constant[mask] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
variable[items] assign[=] list[[<ast.Constant object at 0x7da18f721900>, <ast.Constant object at 0x7da18f723010>, <ast.Constant object at 0x7da18f721ab0>, <ast.Constant object at 0x7da18f7239d0>, <ast.Constant object at 0x7da18f7236d0>, <ast.Constant object at 0x7da18f7205e0>, <ast.Constant object at 0x7da18f7204f0>]]
call[name[kwargs]][constant[mask]] assign[=] binary_operation[constant[mask[%s]] <ast.Mod object at 0x7da2590d6920> call[constant[,].join, parameter[name[items]]]]
variable[_filter] assign[=] call[name[utils].NestedDict, parameter[<ast.BoolOp object at 0x7da18f7228c0>]]
if name[tags] begin[:]
call[call[call[call[name[_filter]][constant[dedicatedHosts]]][constant[tagReferences]]][constant[tag]]][constant[name]] assign[=] dictionary[[<ast.Constant object at 0x7da18f7209d0>, <ast.Constant object at 0x7da18f720940>], [<ast.Constant object at 0x7da18f721a50>, <ast.List object at 0x7da18f723ee0>]]
if name[hostname] begin[:]
call[call[name[_filter]][constant[dedicatedHosts]]][constant[name]] assign[=] call[name[utils].query_filter, parameter[name[hostname]]]
if name[cpus] begin[:]
call[call[name[_filter]][constant[dedicatedHosts]]][constant[cpuCount]] assign[=] call[name[utils].query_filter, parameter[name[cpus]]]
if name[disk] begin[:]
call[call[name[_filter]][constant[dedicatedHosts]]][constant[diskCapacity]] assign[=] call[name[utils].query_filter, parameter[name[disk]]]
if name[memory] begin[:]
call[call[name[_filter]][constant[dedicatedHosts]]][constant[memoryCapacity]] assign[=] call[name[utils].query_filter, parameter[name[memory]]]
if name[datacenter] begin[:]
call[call[call[name[_filter]][constant[dedicatedHosts]]][constant[datacenter]]][constant[name]] assign[=] call[name[utils].query_filter, parameter[name[datacenter]]]
call[name[kwargs]][constant[filter]] assign[=] call[name[_filter].to_dict, parameter[]]
return[call[name[self].account.getDedicatedHosts, parameter[]]]
|
keyword[def] identifier[list_instances] ( identifier[self] , identifier[tags] = keyword[None] , identifier[cpus] = keyword[None] , identifier[memory] = keyword[None] , identifier[hostname] = keyword[None] ,
identifier[disk] = keyword[None] , identifier[datacenter] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[items] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[kwargs] [ literal[string] ]= literal[string] % literal[string] . identifier[join] ( identifier[items] )
identifier[_filter] = identifier[utils] . identifier[NestedDict] ( identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] {})
keyword[if] identifier[tags] :
identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]={
literal[string] : literal[string] ,
literal[string] :[{ literal[string] : literal[string] , literal[string] : identifier[tags] }],
}
keyword[if] identifier[hostname] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[hostname] )
)
keyword[if] identifier[cpus] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[cpus] )
keyword[if] identifier[disk] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[disk] ))
keyword[if] identifier[memory] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[memory] ))
keyword[if] identifier[datacenter] :
identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[datacenter] ))
identifier[kwargs] [ literal[string] ]= identifier[_filter] . identifier[to_dict] ()
keyword[return] identifier[self] . identifier[account] . identifier[getDedicatedHosts] (** identifier[kwargs] )
|
def list_instances(self, tags=None, cpus=None, memory=None, hostname=None, disk=None, datacenter=None, **kwargs):
"""Retrieve a list of all dedicated hosts on the account
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string disk: filter based on disk
:param string datacenter: filter based on datacenter
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching dedicated host.
"""
if 'mask' not in kwargs:
items = ['id', 'name', 'cpuCount', 'diskCapacity', 'memoryCapacity', 'datacenter', 'guestCount']
kwargs['mask'] = 'mask[%s]' % ','.join(items) # depends on [control=['if'], data=['kwargs']]
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['dedicatedHosts']['tagReferences']['tag']['name'] = {'operation': 'in', 'options': [{'name': 'data', 'value': tags}]} # depends on [control=['if'], data=[]]
if hostname:
_filter['dedicatedHosts']['name'] = utils.query_filter(hostname) # depends on [control=['if'], data=[]]
if cpus:
_filter['dedicatedHosts']['cpuCount'] = utils.query_filter(cpus) # depends on [control=['if'], data=[]]
if disk:
_filter['dedicatedHosts']['diskCapacity'] = utils.query_filter(disk) # depends on [control=['if'], data=[]]
if memory:
_filter['dedicatedHosts']['memoryCapacity'] = utils.query_filter(memory) # depends on [control=['if'], data=[]]
if datacenter:
_filter['dedicatedHosts']['datacenter']['name'] = utils.query_filter(datacenter) # depends on [control=['if'], data=[]]
kwargs['filter'] = _filter.to_dict()
return self.account.getDedicatedHosts(**kwargs)
|
def ensure_on():
"""
Start the DbServer if it is off
"""
if get_status() == 'not-running':
if config.dbserver.multi_user:
sys.exit('Please start the DbServer: '
'see the documentation for details')
# otherwise start the DbServer automatically; NB: I tried to use
# multiprocessing.Process(target=run_server).start() and apparently
# it works, but then run-demos.sh hangs after the end of the first
# calculation, but only if the DbServer is started by oq engine (!?)
subprocess.Popen([sys.executable, '-m', 'openquake.server.dbserver',
'-l', 'INFO'])
# wait for the dbserver to start
waiting_seconds = 30
while get_status() == 'not-running':
if waiting_seconds == 0:
sys.exit('The DbServer cannot be started after 30 seconds. '
'Please check the configuration')
time.sleep(1)
waiting_seconds -= 1
|
def function[ensure_on, parameter[]]:
constant[
Start the DbServer if it is off
]
if compare[call[name[get_status], parameter[]] equal[==] constant[not-running]] begin[:]
if name[config].dbserver.multi_user begin[:]
call[name[sys].exit, parameter[constant[Please start the DbServer: see the documentation for details]]]
call[name[subprocess].Popen, parameter[list[[<ast.Attribute object at 0x7da1b0781e40>, <ast.Constant object at 0x7da1b0781450>, <ast.Constant object at 0x7da1b0783d90>, <ast.Constant object at 0x7da1b0780f40>, <ast.Constant object at 0x7da1b0781150>]]]]
variable[waiting_seconds] assign[=] constant[30]
while compare[call[name[get_status], parameter[]] equal[==] constant[not-running]] begin[:]
if compare[name[waiting_seconds] equal[==] constant[0]] begin[:]
call[name[sys].exit, parameter[constant[The DbServer cannot be started after 30 seconds. Please check the configuration]]]
call[name[time].sleep, parameter[constant[1]]]
<ast.AugAssign object at 0x7da1b0783340>
|
keyword[def] identifier[ensure_on] ():
literal[string]
keyword[if] identifier[get_status] ()== literal[string] :
keyword[if] identifier[config] . identifier[dbserver] . identifier[multi_user] :
identifier[sys] . identifier[exit] ( literal[string]
literal[string] )
identifier[subprocess] . identifier[Popen] ([ identifier[sys] . identifier[executable] , literal[string] , literal[string] ,
literal[string] , literal[string] ])
identifier[waiting_seconds] = literal[int]
keyword[while] identifier[get_status] ()== literal[string] :
keyword[if] identifier[waiting_seconds] == literal[int] :
identifier[sys] . identifier[exit] ( literal[string]
literal[string] )
identifier[time] . identifier[sleep] ( literal[int] )
identifier[waiting_seconds] -= literal[int]
|
def ensure_on():
"""
Start the DbServer if it is off
"""
if get_status() == 'not-running':
if config.dbserver.multi_user:
sys.exit('Please start the DbServer: see the documentation for details') # depends on [control=['if'], data=[]]
# otherwise start the DbServer automatically; NB: I tried to use
# multiprocessing.Process(target=run_server).start() and apparently
# it works, but then run-demos.sh hangs after the end of the first
# calculation, but only if the DbServer is started by oq engine (!?)
subprocess.Popen([sys.executable, '-m', 'openquake.server.dbserver', '-l', 'INFO'])
# wait for the dbserver to start
waiting_seconds = 30
while get_status() == 'not-running':
if waiting_seconds == 0:
sys.exit('The DbServer cannot be started after 30 seconds. Please check the configuration') # depends on [control=['if'], data=[]]
time.sleep(1)
waiting_seconds -= 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.