Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
372,400
|
def get_settings_json(self):
return {
: None if self.scanner is None else self.scanner.save(),
: None if self.parser is None else self.parser.save()
}
|
Convert generator settings to JSON.
Returns
-------
`dict`
JSON data.
|
372,401
|
def isTagEqual(self, other):
something
try:
if self.tagName != other.tagName:
return False
myAttributes = self._attributes
otherAttributes = other._attributes
attributeKeysSelf = list(myAttributes.keys())
attributeKeysOther = list(otherAttributes.keys())
except:
return False
if set(attributeKeysSelf) != set(attributeKeysOther):
return False
for key in attributeKeysSelf:
if myAttributes.get(key) != otherAttributes.get(key):
return False
return True
|
isTagEqual - Compare if a tag contains the same tag name and attributes as another tag,
i.e. if everything between < and > parts of this tag are the same.
Does NOT compare children, etc. Does NOT compare if these are the same exact tag in the html (use regular == operator for that)
So for example:
tag1 = document.getElementById('something')
tag2 = copy.copy(tag1)
tag1 == tag2 # This is False
tag1.isTagEqual(tag2) # This is True
@return bool - True if tags have the same name and attributes, otherwise False
|
372,402
|
def ackermann_naive(m: int, n: int) -> int:
if m == 0:
return n + 1
elif n == 0:
return ackermann(m - 1, 1)
else:
return ackermann(m - 1, ackermann(m, n - 1))
|
Ackermann number.
|
372,403
|
def hsvToRGB(h, s, v):
hi = math.floor(h / 60.0) % 6
f = (h / 60.0) - math.floor(h / 60.0)
p = v * (1.0 - s)
q = v * (1.0 - (f * s))
t = v * (1.0 - ((1.0 - f) * s))
D = {0: (v, t, p), 1: (q, v, p), 2: (p, v, t), 3: (p, q, v), 4: (t, p, v),
5: (v, p, q)}
return D[hi]
|
Convert HSV (hue, saturation, value) color space to RGB (red, green blue)
color space.
**Parameters**
**h** : float
Hue, a number in [0, 360].
**s** : float
Saturation, a number in [0, 1].
**v** : float
Value, a number in [0, 1].
**Returns**
**r** : float
Red, a number in [0, 1].
**g** : float
Green, a number in [0, 1].
**b** : float
Blue, a number in [0, 1].
|
372,404
|
def blockType(self, kind):
NBLTYPES = self.verboseRead(TypeCountAlphabet(
+kind[0].upper(),
description=.format(kind),
))
self.numberOfBlockTypes[kind] = NBLTYPES
if NBLTYPES>=2:
self.blockTypeCodes[kind] = self.readPrefixCode(
BlockTypeAlphabet(+kind[0].upper(), NBLTYPES))
self.blockCountCodes[kind] = self.readPrefixCode(
BlockCountAlphabet(+kind[0].upper()))
blockCount = self.verboseRead(self.blockCountCodes[kind])
else:
blockCount = 1<<24
self.currentBlockCounts[kind] = blockCount
|
Read block type switch descriptor for given kind of blockType.
|
372,405
|
def _initAsteriskVersion(self):
if self._ami_version > util.SoftwareVersion():
cmd = "core show version"
else:
cmd = "show version"
cmdresp = self.executeCommand(cmd)
mobj = re.match(, cmdresp)
if mobj:
self._asterisk_version = util.SoftwareVersion(mobj.group(2))
else:
raise Exception()
|
Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
|
372,406
|
def set_back(self, x: int, y: int, r: int, g: int, b: int) -> None:
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
|
Set the background color of one cell.
Args:
x (int): X position to change.
y (int): Y position to change.
r (int): Red background color, from 0 to 255.
g (int): Green background color, from 0 to 255.
b (int): Blue background color, from 0 to 255.
|
372,407
|
def _save_np(obj, pathfileext, compressed=False):
func = np.savez_compressed if compressed else np.savez
dId = obj.Id._todict()
if obj.Id.Cls==:
func(pathfileext, Id=dId, arrayorder=obj._arrayorder, Clock=obj._Clock,
Poly=obj.Poly, Lim=obj.Lim, Sino_RefPt=obj.sino[],
Sino_NP=obj.sino[])
elif obj.Id.Cls==:
func(pathfileext, Id=dId, arrayorder=obj._arrayorder, Clock=obj._Clock,
Poly=obj.Poly, Lim=obj.Lim, mobile=obj._mobile)
elif obj.Id.Cls in [,,,]:
func(pathfileext, Id=dId, extra=obj._extra,
geom=obj.geom, sino=obj.sino, dchans=obj.dchans)
elif obj.Id.Cls in [,,]:
dsave = obj._todict()
if dsave[] is not None and dsave[][] is not None:
LCam = []
for cc in dsave[][]:
pathS = cc[][]
pathN = cc[][]
LCam.append(os.path.join(pathS,pathN+))
dsave[] = LCam
elif dsave[] is not None:
geom = []
if dsave[][] is not None:
pathS = dsave[][][][]
pathN = dsave[][][][]
Ves = os.path.join(pathS,pathN+)
geom += [Ves]
if dsave[][] is not None:
for ss in dsave[][]:
sf = os.path.join(ss[][],
ss[][]+)
geom += [sf]
dsave[] = geom
func(pathfileext, **dsave)
|
elif obj.Id.Cls=='GLOS':
LIdLOS = [ll.Id.todict() for ll in obj.LLOS]
LDs, Lus = np.array([ll.D for ll in obj.LLOS]).T, np.array([ll.u for ll in obj.LLOS]).T
func(pathfileext, Idsave=Idsave, LIdLOS=LIdLOS, LDs=LDs, Lus=Lus, Sino_RefPt=obj.Sino_RefPt, arrayorder=obj._arrayorder, Clock=obj._Clock)
elif obj.Id.Cls=='Lens':
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, O=obj.O, nIn=obj.nIn, Rad=[obj.Rad], F1=[obj.F1], F2=[obj.F2], R1=[obj.R1], R2=[obj.R2], dd=[obj.dd])
elif obj.Id.Cls=='Apert':
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, Poly=obj.Poly)
elif obj.Id.Cls=='Detect':
LOSprops, Sino, Span, Cone, SAng, SynthDiag, Res, Optics = _convert_Detect2Ldict(obj)
VesCalc = {'SavePath':None} if (not hasattr(obj,'_VesCalc') or obj._VesCalc is None) else {'SavePath':obj._VesCalc.Id.SavePath, 'SaveName':obj._VesCalc.Id.SaveName}
func(pathfileext, Idsave=Idsave, Poly=obj.Poly, Rad=obj.Rad, BaryS=obj.BaryS, nIn=obj.nIn, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt, LOSNP=[obj._LOS_NP],
LOSprops=[LOSprops], Sino=[Sino], Span=[Span], Cone=[Cone], SAng=[SAng], SynthDiag=[SynthDiag], Res=[Res], Optics=[Optics], VesCalc=[VesCalc])
elif obj.Id.Cls=='GDetect':
LDetsave, LDetSynthRes = [], []
for ii in range(0,obj.nDetect):
ddIdsave = obj.LDetect[ii].Id.todict()
LOSprops, Sino, Span, Cone, SAng, SynthDiag, Res, Optics = _convert_Detect2Ldict(obj.LDetect[ii])
VesCalc = {'SavePath':None} if (not hasattr(obj.LDetect[ii],'_VesCalc') or obj.LDetect[ii]._VesCalc is None) else {'SavePath':obj.LDetect[ii]._VesCalc.Id.SavePath, 'SaveName':obj.LDetect[ii]._VesCalc.Id.SaveName}
dd = dict(Idsave=ddIdsave, Poly=obj.LDetect[ii].Poly, Rad=obj.LDetect[ii].Rad, BaryS=obj.LDetect[ii].BaryS, nIn=obj.LDetect[ii].nIn, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt,
LOSNP=[obj.LDetect[ii]._LOS_NP], LOSprops=[LOSprops], Sino=[Sino], Span=[Span], Cone=[Cone], SAng=[SAng], Optics=[Optics], VesCalc=[VesCalc])
LDetsave.append(dd)
LDetSynthRes.append({'SynthDiag':[SynthDiag],'Res':[Res]})
Res, lAttr = {}, dir(obj)
for pp in lAttr:
if not inspect.ismethod(getattr(obj,pp)) and '_Res' in pp:
Res[pp] = getattr(obj,pp)
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt, LOSRef=obj._LOSRef, Res=[Res], LDetsave=LDetsave, LDetSynthRes=LDetSynthRes)
# tofu.Eq
elif obj.Id.Cls=='Eq2D':
np.savez(pathfileext, Idsave=Idsave, **obj._Tab)
# tofu.mesh
elif obj.Id.Cls=='Mesh1D':
func(pathfileext, Idsave=Idsave, Knots=obj.Knots)
elif obj.Id.Cls=='Mesh2D':
SubMinds = [{'Name':kk, 'ind':obj._SubMesh[kk]['ind']} for kk in obj._SubMesh.keys()]
func(pathfileext, Idsave=Idsave, Knots=[obj.MeshX1.Knots,obj.MeshX2.Knots], SubMinds=SubMinds, IndBg=obj._get_CentBckg()[1])
elif obj.Id.Cls=='BF2D':
Id = np.array(['BF2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
IdMesh = np.array(['Mesh2D',obj.Mesh.Id.Name,obj.Mesh.Id.SaveName,obj.Mesh.Id.SavePath,obj.Mesh.Id._dtFormat],dtype=str)
dtime, dtimeMesh = np.array([obj.Id._dtime],dtype=object), np.array([obj.Mesh.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
func(pathfileext, Id=Id, IdMesh=IdMesh, dtime=dtime, IdUSR=USR, dtimeMesh=dtimeMesh, KnotsR=obj.Mesh.MeshR.Knots, KnotsZ=obj.Mesh.MeshZ.Knots, Deg=np.array([obj.Deg],dtype=int), Ind=obj.Mesh._get_CentBckg()[1])
# tofu.matcomp
elif obj.Id.Cls=='GMat2D':
Id = np.array(['GMat2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
dtime = np.array([obj.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
IdObj, IdObjUSR = save_np_IdObj(obj.Id)
CompParamVal = np.array([obj._Mat_epsrel, obj._Mat_SubP, obj._Mat_SubTheta, obj._indMat_SubP, obj._MatLOS_epsrel, obj._MatLOS_SubP, int(obj._Mat_Fast)])
CompParamStr = np.array([obj._Mat_Mode, obj._Mat_SubMode, obj._Mat_SubThetaMode, obj._MatLOS_Mode, obj._MatLOS_SubMode])
func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, Ves=IdObj[2], VesUSR=IdObjUSR[2], LDetect=IdObj[1], BF2=IdObj[0], BF2USR=IdObjUSR[0], LDetectUSR=IdObjUSR[1], CompParamVal=CompParamVal,
CompParamStr=CompParamStr, indMat=obj._indMat, Matdata=obj._Mat_csr.data, Matind=obj._Mat_csr.indices, Matindpr=obj._Mat_csr.indptr, Matshape=obj._Mat_csr.shape,
MatLOSdata=obj._MatLOS_csr.data, MatLOSind=obj._MatLOS_csr.indices, MatLOSindpr=obj._MatLOS_csr.indptr, MatLOSshape=obj._MatLOS_csr.shape,
BF2Par=np.array([obj._BF2_Deg,obj._BF2_NFunc,obj._BF2_NCents]), LD_nD=obj._LD_nDetect)
# tofu.treat
elif obj.Id.Cls=='PreData':
Init, Update = _convert_PreData2Ldict(obj)
func(pathfileext, Idsave=Idsave, Init=[Init], Update=[Update])
#Id = np.array(['PreData',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
#dtime = np.array([obj.Id._dtime],dtype=object)
#USR = np.asarray(obj.Id.USRdict)
#IdObj, IdObjUSR = save_np_IdObj(obj.Id)
#StrPar = np.asarray([obj._Exp, obj._interpkind])
#func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, LDetect=IdObj[0], LDetectUSR=IdObjUSR[0],
# DLPar=obj._DLPar, shot=obj._shot, StrPar=StrPar, Dt=obj._Dt, DtMarg=obj._DtMargin, MovMeanfreq=obj._MovMeanfreq, Resamp=obj._Resamp,
# indOut=obj._indOut, indCorr=obj._indCorr, PhysNoise=obj._PhysNoise, NoiseMod=obj._NoiseModel, interp_lt=obj._interp_lt, interp_lN=obj._interp_lNames)
# tofu.inv
elif obj.Id.Cls=='Sol2D':
Id = np.array(['Sol2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
dtime = np.array([obj.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
IdObj, IdObjUSR = save_np_IdObj(obj.Id)
try:
timing = obj._timing
except Exception:
timing = obj._t2
func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, PreData=IdObj[2], PreDataUSR=IdObjUSR[2], GMat2D=IdObj[1], GMatUSR=IdObjUSR[1], BF2D=IdObj[0], BF2DUSR=IdObjUSR[0],
InvParam=obj.InvParam, shot=obj.shot, LNames=obj._LNames, Run=obj._run,
LOS=obj._LOS, data=obj._data, t=obj._t, Coefs=obj._Coefs, sigma=obj._sigma, Mu=obj._Mu, Chi2N=obj._Chi2N, R = obj._R, Nit=obj._Nit, Spec=obj._Spec, t2=timing, PostTreat=obj._PostTreat)
|
372,408
|
def check_authorization(self):
try:
store = self.__config.get()
if store is None:
return True
auth_info = self.headers.get()
if not auth_info:
return False
auth_info = auth_info.split()
if len(auth_info) != 2 or auth_info[0] != :
return False
auth_info = base64.b64decode(auth_info[1]).decode(sys.getdefaultencoding())
username = auth_info.split()[0]
password = .join(auth_info.split()[1:])
password_bytes = password.encode(sys.getdefaultencoding())
if hasattr(self, ):
if self.custom_authentication(username, password):
self.basic_auth_user = username
return True
return False
if not username in store:
self.server.logger.warning( + username)
return False
password_data = store[username]
if password_data[] == :
if password == password_data[]:
self.basic_auth_user = username
return True
elif hashlib.new(password_data[], password_bytes).digest() == password_data[]:
self.basic_auth_user = username
return True
self.server.logger.warning( + username)
except Exception:
pass
return False
|
Check for the presence of a basic auth Authorization header and
if the credentials contained within in are valid.
:return: Whether or not the credentials are valid.
:rtype: bool
|
372,409
|
def surface_nodes(self):
line = []
for point in self.mesh:
line.append(point.longitude)
line.append(point.latitude)
line.append(point.depth)
return [Node(, nodes=[Node(, {}, line)])]
|
:param points: a list of Point objects
:returns: a Node of kind 'griddedSurface'
|
372,410
|
def _post_deactivate_injection(self):
self.active = False
self.app.signals.send("plugin_deactivate_post", self)
self.signals.deactivate_plugin_signals()
|
Injects functions after the deactivation routine of child classes got called
:return: None
|
372,411
|
def omegac(self,R):
return nu.sqrt(-self.Rforce(R,use_physical=False)/R)
|
NAME:
omegac
PURPOSE:
calculate the circular angular speed at R in potential Pot
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius (can be Quantity)
OUTPUT:
circular angular speed
HISTORY:
2011-10-09 - Written - Bovy (IAS)
|
372,412
|
def log(self, level, prefix = ):
logging.log(level, "%sname: %s", prefix, self.__name)
logging.log(level, "%soptions: %s", prefix, self.__options)
|
Writes the contents of the Extension to the logging system.
|
372,413
|
def _parse_sid_response(res):
res = json.loads(list(ChunkParser().get_chunks(res))[0])
sid = res[0][1][1]
gsessionid = res[1][1][0][]
return (sid, gsessionid)
|
Parse response format for request for new channel SID.
Example format (after parsing JS):
[ [0,["c","SID_HERE","",8]],
[1,[{"gsid":"GSESSIONID_HERE"}]]]
Returns (SID, gsessionid) tuple.
|
372,414
|
def gettext(ui_file_path):
with open(ui_file_path, ) as fin:
content = fin.read()
content = re.sub(r, , content)
content = content.replace(
, )
with open(ui_file_path, ) as fout:
fout.write(content)
|
Let you use gettext instead of the Qt tools for l18n
|
372,415
|
def get_perceel_by_id_and_sectie(self, id, sectie):
sid = sectie.id
aid = sectie.afdeling.id
gid = sectie.afdeling.gemeente.id
sectie.clear_gateway()
def creator():
url = self.base_url + % (
gid, aid, sid, id)
h = self.base_headers
p = {
: ,
: ,
:
}
res = capakey_rest_gateway_request(url, h, p).json()
return Perceel(
res[],
sectie,
res[],
Perceel.get_percid_from_capakey(res[]),
None,
None,
self._parse_centroid(res[][]),
self._parse_bounding_box(res[][]),
res[][]
)
if self.caches[].is_configured:
key = % (id, sectie.id, sectie.afdeling.id)
perceel = self.caches[].get_or_create(key, creator)
else:
perceel = creator()
perceel.set_gateway(self)
return perceel
|
Get a `perceel`.
:param id: An id for a `perceel`.
:param sectie: The :class:`Sectie` that contains the perceel.
:rtype: :class:`Perceel`
|
372,416
|
def download_wiki():
ambiguous = [i for i in l.UNITS.items() if len(i[1]) > 1]
ambiguous += [i for i in l.DERIVED_ENT.items() if len(i[1]) > 1]
pages = set([(j.name, j.uri) for i in ambiguous for j in i[1]])
print
objs = []
for num, page in enumerate(pages):
obj = {: page[1]}
obj[] = obj[].replace(, )
obj[] = obj[].replace(, )
print % \
(obj[], num + 1, len(pages))
obj[] = wikipedia.page(obj[]).content
obj[] = page[0]
objs.append(obj)
path = os.path.join(l.TOPDIR, )
os.remove(path)
json.dump(objs, open(path, ), indent=4, sort_keys=True)
print
|
Download WikiPedia pages of ambiguous units.
|
372,417
|
def from_tree(cls, repo, *treeish, **kwargs):
if len(treeish) == 0 or len(treeish) > 3:
raise ValueError("Please specify between 1 and 3 treeish, got %i" % len(treeish))
arg_list = []
if len(treeish) > 1:
arg_list.append("--reset")
arg_list.append("--aggressive")
tmp_index = tempfile.mktemp(, , repo.git_dir)
arg_list.append("--index-output=%s" % tmp_index)
arg_list.extend(treeish)
index_handler = TemporaryFileSwap(join_path_native(repo.git_dir, ))
try:
repo.git.read_tree(*arg_list, **kwargs)
index = cls(repo, tmp_index)
index.entries
del(index_handler)
finally:
if osp.exists(tmp_index):
os.remove(tmp_index)
return index
|
Merge the given treeish revisions into a new index which is returned.
The original index will remain unaltered
:param repo:
The repository treeish are located in.
:param treeish:
One, two or three Tree Objects, Commits or 40 byte hexshas. The result
changes according to the amount of trees.
If 1 Tree is given, it will just be read into a new index
If 2 Trees are given, they will be merged into a new index using a
two way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other'
one. It behaves like a fast-forward.
If 3 Trees are given, a 3-way merge will be performed with the first tree
being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current' tree,
tree 3 is the 'other' one
:param kwargs:
Additional arguments passed to git-read-tree
:return:
New IndexFile instance. It will point to a temporary index location which
does not exist anymore. If you intend to write such a merged Index, supply
an alternate file_path to its 'write' method.
:note:
In the three-way merge case, --aggressive will be specified to automatically
resolve more cases in a commonly correct manner. Specify trivial=True as kwarg
to override that.
As the underlying git-read-tree command takes into account the current index,
it will be temporarily moved out of the way to assure there are no unsuspected
interferences.
|
372,418
|
def get_chat_ids(self):
updates = self.get_updates()
chat_ids = []
if updates:
for update in updates:
message = update[]
if message[] == :
chat_ids.append(message[][])
return list(set(chat_ids))
|
Returns unique chat IDs from `/start` command messages sent to our bot by users.
Those chat IDs can be used to send messages to chats.
:rtype: list
|
372,419
|
def fetch_ensembl_exons(build=):
LOG.info("Fetching ensembl exons build %s ...", build)
if build == :
url =
else:
url =
dataset_name =
dataset = pybiomart.Dataset(name=dataset_name, host=url)
attributes = [
,
,
,
,
,
,
,
,
,
,
,
]
filters = {
: CHROMOSOMES,
}
result = dataset.query(
attributes = attributes,
filters = filters
)
return result
|
Fetch the ensembl genes
Args:
build(str): ['37', '38']
|
372,420
|
def make(world_name, gl_version=GL_VERSION.OPENGL4, window_res=None, cam_res=None, verbose=False):
holodeck_worlds = _get_worlds_map()
if world_name not in holodeck_worlds:
raise HolodeckException("Invalid World Name")
param_dict = copy(holodeck_worlds[world_name])
param_dict["start_world"] = True
param_dict["uuid"] = str(uuid.uuid4())
param_dict["gl_version"] = gl_version
param_dict["verbose"] = verbose
if window_res is not None:
param_dict["window_width"] = window_res[0]
param_dict["window_height"] = window_res[1]
if cam_res is not None:
param_dict["camera_width"] = cam_res[0]
param_dict["camera_height"] = cam_res[1]
return HolodeckEnvironment(**param_dict)
|
Creates a holodeck environment using the supplied world name.
Args:
world_name (str): The name of the world to load as an environment. Must match the name of a world in an
installed package.
gl_version (int, optional): The OpenGL version to use (Linux only). Defaults to GL_VERSION.OPENGL4.
window_res ((int, int), optional): The resolution to load the game window at. Defaults to (512, 512).
cam_res ((int, int), optional): The resolution to load the pixel camera sensors at. Defaults to (256, 256).
verbose (bool): Whether to run in verbose mode. Defaults to False.
Returns:
HolodeckEnvironment: A holodeck environment instantiated with all the settings necessary for the specified
world, and other supplied arguments.
|
372,421
|
def format_bytes_size(val):
if not val:
return
for sz_name in [, , , , , , ]:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError()
|
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
|
372,422
|
def inspect_members(self):
if not self._inspect_members:
TemplateGenerator._inspect_members = \
self._import_all_troposphere_modules()
return self._inspect_members
|
Returns the list of all troposphere members we are able to
construct
|
372,423
|
def create_machine_group(self, project_name, group_detail):
headers = {}
params = {}
resource = "/machinegroups"
headers[] =
body = six.b(json.dumps(group_detail.to_json()))
headers[] = str(len(body))
(resp, headers) = self._send("POST", project_name, body, resource, params, headers)
return CreateMachineGroupResponse(headers, resp)
|
create machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: CreateMachineGroupResponse
:raise: LogException
|
372,424
|
async def send_tokens(payment_handle: int, tokens: int, address: str) -> str:
logger = logging.getLogger(__name__)
if not hasattr(Wallet.send_tokens, "cb"):
logger.debug("vcx_wallet_send_tokens: Creating callback")
Wallet.send_tokens.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_payment_handle = c_uint32(payment_handle)
c_tokens = c_char_p(str(tokens).encode())
c_address = c_char_p(address.encode())
result = await do_call(,
c_payment_handle,
c_tokens,
c_address,
Wallet.send_tokens.cb)
logger.debug("vcx_wallet_send_tokens completed")
return result
|
Sends tokens to an address
payment_handle is always 0
:param payment_handle: Integer
:param tokens: Integer
:param address: String
Example:
payment_handle = 0
amount = 1000
address = await Wallet.create_payment_address('00000000000000000000000001234567')
await Wallet.send_tokens(payment_handle, amount, address)
:return:
|
372,425
|
def execute(self, eopatch=None, bbox=None, time_interval=None):
if eopatch is None:
eopatch = EOPatch()
request_params, service_type = self._prepare_request_data(eopatch, bbox, time_interval)
request = {ServiceType.WMS: WmsRequest,
ServiceType.WCS: WcsRequest}[service_type](**request_params)
request_dates = request.get_dates()
if not eopatch.timestamp:
eopatch.timestamp = request_dates
download_frames = None
if self.feature_type.is_time_dependent():
download_frames = get_common_timestamps(request_dates, eopatch.timestamp)
images = request.get_data(raise_download_errors=self.raise_download_errors, data_filter=download_frames)
if not self.raise_download_errors:
bad_data = [idx for idx, value in enumerate(images) if value is None]
for idx in reversed(bad_data):
LOGGER.warning(, str(request_dates[idx]), self.layer)
del images[idx]
del request_dates[idx]
for removed_frame in eopatch.consolidate_timestamps(request_dates):
LOGGER.warning(
, str(removed_frame), self.layer)
self._add_data(eopatch, np.asarray(images))
self._add_meta_info(eopatch, request_params, service_type)
return eopatch
|
Creates OGC (WMS or WCS) request, downloads requested data and stores it together
with valid data mask in newly created EOPatch. Returns the EOPatch.
:param eopatch:
:type eopatch: EOPatch or None
:param bbox: specifies the bounding box of the requested image. Coordinates must be in
the specified coordinate reference system. Required.
:type bbox: BBox
:param time_interval: time or time range for which to return the results, in ISO8601 format
(year-month-date, for example: ``2016-01-01``, or year-month-dateThours:minutes:seconds
format, i.e. ``2016-01-01T16:31:21``). When a single time is specified the request will
return data for that specific date, if it exists. If a time range is specified the result
is a list of all scenes between the specified dates conforming to the cloud coverage
criteria. Most recent acquisition being first in the list. For the latest acquisition use
``latest``. Examples: ``latest``, ``'2016-01-01'``, or ``('2016-01-01', ' 2016-01-31')``
:type time_interval: datetime.datetime, str, or tuple of datetime.datetime/str
|
372,426
|
def check_base_suggested_attributes(self, dataset):
persongroupinstitutionpositions institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: , , , or . (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata
suggested_ctx = TestCtx(BaseCheck.LOW, )
platform_name = getattr(dataset, , )
suggested_ctx.assert_true(platform_name != , )
cdm_data_type = getattr(dataset, , )
suggested_ctx.assert_true(cdm_data_type.lower() in [, , , , , , ],
.format(cdm_data_type))
for attr in [, , ]:
attr_value = getattr(dataset, attr, )
try:
parse_datetime(attr_value)
suggested_ctx.assert_true(True, )
except ISO8601Error:
suggested_ctx.assert_true(False, .format(attr, attr_value))
units = getattr(dataset, , ).lower()
suggested_ctx.assert_true(units == , .format(units))
units = getattr(dataset, , ).lower()
suggested_ctx.assert_true(units == , .format(units))
contributor_name = getattr(dataset, , )
contributor_role = getattr(dataset, , )
names = contributor_role.split()
roles = contributor_role.split()
suggested_ctx.assert_true(contributor_name != , )
suggested_ctx.assert_true(len(names) == len(roles), )
suggested_ctx.assert_true(contributor_role != , )
suggested_ctx.assert_true(len(names) == len(roles), )
return suggested_ctx.to_result()
|
Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD)
:metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD)
:references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF)
|
372,427
|
def _display_token(self):
if self.token is None:
return "301 Moved", "", {"Location": "/login"}
return ("200 OK",
self.TOKEN_TEMPLATE.format(
access_token=self.token["access_token"]),
{"Content-Type": "text/html"})
|
Display token information or redirect to login prompt if none is
available.
|
372,428
|
def yahoo(base, target):
api_url =
resp = requests.get(
api_url,
params={
: ,
: ,
: .format(base, target)
},
timeout=1,
)
value = resp.text.split(, 2)[1]
return decimal.Decimal(value)
|
Parse data from Yahoo.
|
372,429
|
def degree_circle(self,EdgeAttribute=None,network=None,NodeAttribute=None,\
nodeList=None,singlePartition=None,verbose=None):
network=check_network(self,network,verbose=verbose)
PARAMS=set_param([,,,,\
],[EdgeAttribute,network,NodeAttribute,nodeList,\
singlePartition])
response=api(url=self.__url+"/degree-circle", PARAMS=PARAMS, method="POST", verbose=verbose)
return response
|
Execute the Degree Sorted Circle Layout on a network.
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
|
372,430
|
def to_python(cls, value, **kwargs):
if not value:
return
try:
return str(value)
except:
pass
try:
return value.encode()
except:
pass
raise cls.exception("Cannot deserialize value {0} tostring".format(value))
|
String deserialisation just return the value as a string
|
372,431
|
def BuildAdGroupCriterionOperations(adgroup_operations, number_of_keywords=1):
criterion_operations = [
{
: ,
: {
: ,
: adgroup_operation[][],
: {
: ,
: % (i, if i % 2 == 0 else ),
:
}
},
:
}
for adgroup_operation in adgroup_operations
for i in range(number_of_keywords)]
return criterion_operations
|
Builds the operations adding a Keyword Criterion to each AdGroup.
Args:
adgroup_operations: a list containing the operations that will add AdGroups.
number_of_keywords: an int defining the number of Keywords to be created.
Returns:
a list containing the operations that will create a new Keyword Criterion
associated with each provided AdGroup.
|
372,432
|
def get_remote_url(self, remote=, cached=True):
if hasattr(self.__class__, ) and cached:
url = self.__class__._remote_url
else:
r = self.get_remote(remote)
try:
url = list(r.urls)[0]
except GitCommandError as ex:
if in str(ex):
cmd = [, , , % r.name]
url = Git().execute(cmd)
else:
raise ex
except AttributeError:
url = None
if url is not None and url.startswith():
domain = url.split()[1].split()[0]
path = url.split()[1]
url = "http://%s/%s" % (domain, path)
self.__class__._remote_url = url
return url
|
Get a git remote URL for this instance.
|
372,433
|
def _get_ensemble_bed_files(items):
bed_files = []
for data in items:
for sv in data.get("sv", []):
if sv["variantcaller"] == "sv-ensemble":
if ("vrn_file" in sv and not vcfutils.get_paired_phenotype(data) == "normal"
and file_exists(sv["vrn_file"])):
bed_files.append(sv["vrn_file"])
return bed_files
|
get all ensemble structural BED file calls, skipping any normal samples from
tumor/normal calls
|
372,434
|
def get_load(jid):
cb_ = _get_connection()
try:
jid_doc = cb_.get(six.text_type(jid))
except couchbase.exceptions.NotFoundError:
return {}
ret = {}
try:
ret = jid_doc.value[]
ret[] = jid_doc.value[]
except KeyError as e:
log.error(e)
return ret
|
Return the load data that marks a specified jid
|
372,435
|
def vlink(s_expnum, s_ccd, s_version, s_ext,
l_expnum, l_ccd, l_version, l_ext, s_prefix=None, l_prefix=None):
source_uri = get_uri(s_expnum, ccd=s_ccd, version=s_version, ext=s_ext, prefix=s_prefix)
link_uri = get_uri(l_expnum, ccd=l_ccd, version=l_version, ext=l_ext, prefix=l_prefix)
return client.link(source_uri, link_uri)
|
make a link between two version of a file.
@param s_expnum:
@param s_ccd:
@param s_version:
@param s_ext:
@param l_expnum:
@param l_ccd:
@param l_version:
@param l_ext:
@param s_prefix:
@param l_prefix:
@return:
|
372,436
|
def hover_pixmap(self, value):
if value is not None:
assert type(value) is QPixmap, " attribute: type is not !".format(
"hover_pixmap", value)
self.__hover_pixmap = value
|
Setter for **self.__hover_pixmap** attribute.
:param value: Attribute value.
:type value: QPixmap
|
372,437
|
def omim_terms(case_obj):
LOG.info("Collecting OMIM disorders for case {}".format(case_obj.get()))
disorders = []
case_disorders = case_obj.get()
if case_disorders:
for disorder in case_disorders:
disorder_obj = {
"id" : .join([ , str(disorder)])
}
disorders.append(disorder_obj)
return disorders
|
Extract all OMIM phenotypes available for the case
Args:
case_obj(dict): a scout case object
Returns:
disorders(list): a list of OMIM disorder objects
|
372,438
|
def get_streaming(self, path, stype="M3U8_AUTO_480", **kwargs):
params = {
: path,
: stype
}
url = .format(BAIDUPCS_SERVER)
while True:
ret = self._request(, , url=url, extra_params=params, **kwargs)
if not ret.ok:
logging.debug( % ret.status_code)
jdata = json.loads(ret.content)
if jdata[] == 31345:
continue
elif jdata[] == 31066:
return 31066
elif jdata[] == 31304:
return 31304
elif jdata[] == 31023:
return 31023
return ret.content
|
获得视频的m3u8列表
:param path: 视频文件路径
:param stype: 返回stream类型, 已知有``M3U8_AUTO_240``/``M3U8_AUTO_480``/``M3U8_AUTO_720``
.. warning::
M3U8_AUTO_240会有问题, 目前480P是最稳定的, 也是百度网盘默认的
:return: str 播放(列表)需要的信息
|
372,439
|
def create_html(self, filename=None):
if isinstance(self.style, str):
style = "".format(self.style)
else:
style = self.style
options = dict(
gl_js_version=GL_JS_VERSION,
accessToken=self.access_token,
div_id=self.div_id,
style=style,
center=list(self.center),
zoom=self.zoom,
geojson_data=json.dumps(self.data, ensure_ascii=False),
belowLayer=self.below_layer,
opacity=self.opacity,
minzoom=self.min_zoom,
maxzoom=self.max_zoom,
pitch=self.pitch,
bearing=self.bearing,
boxZoomOn=json.dumps(self.box_zoom_on),
doubleClickZoomOn=json.dumps(self.double_click_zoom_on),
scrollZoomOn=json.dumps(self.scroll_zoom_on),
touchZoomOn=json.dumps(self.touch_zoom_on),
popupOpensOnHover=self.popup_open_action==,
includeSnapshotLinks=self.add_snapshot_links,
preserveDrawingBuffer=json.dumps(self.add_snapshot_links),
showScale=self.scale,
scaleUnits=self.scale_unit_system,
scaleBorderColor=self.scale_border_color,
scalePosition=self.scale_position,
scaleFillColor=self.scale_background_color,
scaleTextColor=self.scale_text_color,
)
if self.legend:
if all([self.legend, self.legend_gradient, self.legend_function == ]):
raise LegendError(.join([,
]))
options.update(
showLegend=self.legend,
legendLayout=self.legend_layout,
legendFunction=self.legend_function,
legendStyle=self.legend_style,
legendGradient=json.dumps(self.legend_gradient),
legendFill=self.legend_fill,
legendHeaderFill=self.legend_header_fill,
legendTextColor=self.legend_text_color,
legendNumericPrecision=json.dumps(self.legend_text_numeric_precision),
legendTitleHaloColor=self.legend_title_halo_color,
legendKeyShape=self.legend_key_shape,
legendKeyBordersOn=json.dumps(self.legend_key_borders_on)
)
if self.vector_source:
options.update(
vectorUrl=self.vector_url,
vectorLayer=self.vector_layer_name,
vectorJoinDataProperty=self.vector_join_property,
joinData=json.dumps(False),
dataJoinProperty=self.data_join_property,
enableDataJoin=not self.disable_data_join
)
data = geojson_to_dict_list(self.data)
if bool(data):
options.update(joinData=json.dumps(data, ensure_ascii=False))
if self.label_property is None:
options.update(labelProperty=None)
else:
options.update(labelProperty= + self.label_property + )
options.update(
labelColor=self.label_color,
labelSize=self.label_size,
labelHaloColor=self.label_halo_color,
labelHaloWidth=self.label_halo_width
)
self.add_unique_template_variables(options)
if filename:
html = templates.format(self.template, **options)
with codecs.open(filename, "w", "utf-8-sig") as f:
f.write(html)
return None
else:
return templates.format(self.template, **options)
|
Create a circle visual from a geojson data source
|
372,440
|
def make_conditional(
self, request_or_environ, accept_ranges=False, complete_length=None
):
environ = _get_environ(request_or_environ)
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
if "date" not in self.headers:
self.headers["Date"] = http_date()
accept_ranges = _clean_accept_ranges(accept_ranges)
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if not is206 and not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
):
if parse_etags(environ.get("HTTP_IF_MATCH")):
self.status_code = 412
else:
self.status_code = 304
if (
self.automatically_set_content_length
and "content-length" not in self.headers
):
length = self.calculate_content_length()
if length is not None:
self.headers["Content-Length"] = length
return self
|
Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If ``None``, it will be set to
``"none"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
|
372,441
|
def parse(self, parser):
lineno = next(parser.stream).lineno
num_called_num = False
plural_expr = None
plural_expr_assignment = None
variables = {}
trimmed = None
while parser.stream.current.type != :
if variables:
parser.stream.expect()
if parser.stream.skip_if():
break
name = parser.stream.expect()
if name.value in variables:
parser.fail( %
name.value, name.lineno,
exc=TemplateAssertionError)
if parser.stream.current.type == :
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
elif trimmed is None and name.value in (, ):
trimmed = name.value ==
continue
else:
variables[name.value] = var = nodes.Name(name.value, )
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name(, )
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name(, ), var)
else:
plural_expr = var
num_called_num = name.value ==
parser.stream.expect()
plural = None
have_plural = False
referenced = set()
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], )
num_called_num = singular_names[0] ==
if parser.stream.current.test():
have_plural = True
next(parser.stream)
if parser.stream.current.type != :
name = parser.stream.expect()
if name.value not in variables:
parser.fail( %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value ==
parser.stream.expect()
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, )
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail(, lineno)
if trimmed is None:
trimmed = self.environment.policies[]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
|
Parse a translatable tag.
|
372,442
|
def apply_patch(self):
if sys.version_info >= (3, 0):
pass
else:
from .patch.socket import socket as patch
socket.socket = patch
|
Fix default socket lib to handle client disconnection while receiving data (Broken pipe)
|
372,443
|
def boundary_polygon(self, time):
ti = np.where(time == self.times)[0][0]
com_x, com_y = self.center_of_mass(time)
padded_mask = np.pad(self.masks[ti], 1, , constant_values=0)
chull = convex_hull_image(padded_mask)
boundary_image = find_boundaries(chull, mode=, background=0)
boundary_image = boundary_image[1:-1,1:-1]
boundary_x = self.x[ti].ravel()[boundary_image.ravel()]
boundary_y = self.y[ti].ravel()[boundary_image.ravel()]
r = np.sqrt((boundary_x - com_x) ** 2 + (boundary_y - com_y) ** 2)
theta = np.arctan2((boundary_y - com_y), (boundary_x - com_x)) * 180.0 / np.pi + 360
polar_coords = np.array([(r[x], theta[x]) for x in range(r.size)], dtype=[(, ), (, )])
coord_order = np.argsort(polar_coords, order=[, ])
ordered_coords = np.vstack([boundary_x[coord_order], boundary_y[coord_order]])
return ordered_coords
|
Get coordinates of object boundary in counter-clockwise order
|
372,444
|
def to_value_list(original_strings, corenlp_values=None):
assert isinstance(original_strings, (list, tuple, set))
if corenlp_values is not None:
assert isinstance(corenlp_values, (list, tuple, set))
assert len(original_strings) == len(corenlp_values)
return list(set(to_value(x, y) for (x, y)
in zip(original_strings, corenlp_values)))
else:
return list(set(to_value(x) for x in original_strings))
|
Convert a list of strings to a list of Values
Args:
original_strings (list[basestring])
corenlp_values (list[basestring or None])
Returns:
list[Value]
|
372,445
|
def name(object):
"Try to find some reasonable name for the object."
return (getattr(object, , 0) or getattr(object, , 0)
or getattr(getattr(object, , 0), , 0)
or str(object))
|
Try to find some reasonable name for the object.
|
372,446
|
def getVariable(dbg, thread_id, frame_id, scope, attrs):
if scope == :
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split()
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
sys.stderr.write( % (frame_id,))
return None
frame = dbg.find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split()
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", )
if scope == :
for count in xrange(len(attrList)):
if count == 0:
var = evaluate_expression(dbg, frame, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0]
else:
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
|
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
|
372,447
|
def copy_ecu_with_frames(ecu_or_glob, source_db, target_db):
if isinstance(ecu_or_glob, cm.Ecu):
ecu_list = [ecu_or_glob]
else:
ecu_list = source_db.glob_ecus(ecu_or_glob)
for ecu in ecu_list:
logger.info("Copying ECU " + ecu.name)
target_db.add_ecu(copy.deepcopy(ecu))
for frame in source_db.frames:
if ecu.name in frame.transmitters:
copy_frame(frame.arbitration_id, source_db, target_db)
for frame in source_db.frames:
for signal in frame.signals:
if ecu.name in signal.receivers:
copy_frame(frame.arbitration_id, source_db, target_db)
break
for attribute in ecu.attributes:
if attribute not in target_db.ecu_defines:
target_db.add_ecu_defines(
copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].definition))
target_db.add_define_default(
copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].defaultValue))
if source_db.ecu_defines[attribute].type == :
temp_attr = ecu.attribute(attribute, db=source_db)
if temp_attr not in target_db.ecu_defines[attribute].values:
target_db.ecu_defines[attribute].values.append(copy.deepcopy(temp_attr))
target_db.ecu_defines[attribute].update()
|
Copy ECU(s) identified by Name or as Object from source CAN matrix to target CAN matrix.
This function additionally copy all relevant Frames and Defines.
:param ecu_or_glob: Ecu instance or glob pattern for Ecu name
:param source_db: Source CAN matrix
:param target_db: Destination CAN matrix
|
372,448
|
def use(self, func, when=):
print(.format(func.__name__))
self.middlewares.append({
: func,
: func.__name__,
: func.func_code.co_varnames,
: when
})
|
Append a middleware to the algorithm
|
372,449
|
def get_descriptor_defaults(self, api_info, hostname=None, x_google_api_name=False):
hostname = (hostname or util.get_app_hostname() or
api_info.hostname)
protocol = if ((hostname and hostname.startswith()) or
util.is_running_on_devserver()) else
base_path = api_info.base_path
if base_path != :
base_path = base_path.rstrip()
defaults = {
: ,
: {
: api_info.api_version,
: api_info.name
},
: hostname,
: [],
: [],
: [protocol],
: base_path,
}
if x_google_api_name:
defaults[] = _validate_api_name(api_info.name)
return defaults
|
Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
|
372,450
|
def dev_get_chunk(dev_name, state, pugrp=None, punit=None):
rprt = dev_get_rprt(dev_name, pugrp, punit)
if not rprt:
return None
return next((d for d in rprt if d["cs"] == state), None)
|
Get a chunk-descriptor for the first chunk in the given state.
If the pugrp and punit is set, then search only that pugrp/punit
@returns the first chunk in the given state if one exists, None otherwise
|
372,451
|
def generate(env):
"Add RPCGEN Builders and construction variables for an Environment."
client = Builder(action=rpcgen_client, suffix=, src_suffix=)
header = Builder(action=rpcgen_header, suffix=, src_suffix=)
service = Builder(action=rpcgen_service, suffix=, src_suffix=)
xdr = Builder(action=rpcgen_xdr, suffix=, src_suffix=)
env.Append(BUILDERS={ : client,
: header,
: service,
: xdr})
env[] =
env[] = SCons.Util.CLVar()
env[] = SCons.Util.CLVar()
env[] = SCons.Util.CLVar()
env[] = SCons.Util.CLVar()
env[] = SCons.Util.CLVar()
|
Add RPCGEN Builders and construction variables for an Environment.
|
372,452
|
def _compile_int_g(self):
string =
self.int_g = compile(eval(string), , )
|
Time Domain Simulation - update algebraic equations and Jacobian
|
372,453
|
def createFromSource(cls, vs, name, registry):
s a valid package
name (this does not guarantee that the component actually exists in
the registry: use availableVersions() for that).
t match this then escalate to make
if registry == :
name_match = re.match(, name)
if not name_match:
raise access_common.AccessException(
% name
)
else:
name_match = re.match(, name)
if not name_match:
raise access_common.AccessException(
% name
)
assert(vs.semantic_spec)
return RegistryThing(name, vs.semantic_spec, registry)
|
returns a registry component for anything that's a valid package
name (this does not guarantee that the component actually exists in
the registry: use availableVersions() for that).
|
372,454
|
def _get_build_prefix():
path = os.path.join(
tempfile.gettempdir(),
% __get_username().replace(, )
)
if WINDOWS:
return path
try:
os.mkdir(path)
write_delete_marker_file(path)
except OSError:
file_uid = None
try:
file_uid = get_path_uid(path)
except OSError:
file_uid = None
if file_uid != os.geteuid():
msg = (
"The temporary folder for building (%s) is either not owned by"
" you, or is a symlink." % path
)
print(msg)
print(
"pip will not work until the temporary folder is either "
"deleted or is a real directory owned by your user account."
)
raise exceptions.InstallationError(msg)
return path
|
Returns a safe build_prefix
|
372,455
|
def entropy_H(self, data):
if len(data) == 0:
return 0.0
occurences = array.array(, [0]*256)
for x in data:
occurences[ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
|
Calculate the entropy of a chunk of data.
|
372,456
|
def _newproject(command, path, name, settings):
key = None
title = _get_project_title()
template = _get_template(settings)
git = sh.git.bake(_cwd=path)
puts(git.init())
if template.get("url"):
puts(git.submodule.add(template[], ))
puts(git.submodule.update(*[]))
key = _create_spreadsheet(name, title, path, settings)
puts(colored.green("\nCopying html files..."))
files = glob.iglob(os.path.join(path, "_blueprint", "*.html"))
for file in files:
if os.path.isfile(file):
dir, filename = os.path.split(file)
if not filename.startswith("_") and not filename.startswith("."):
puts("Copying {0} to {1}".format(filename, path))
shutil.copy2(file, path)
ignore = os.path.join(path, "_blueprint", ".gitignore")
if os.path.isfile(ignore):
shutil.copy2(ignore, path)
else:
empty_index_path = os.path.join(path, "index.html")
open(empty_index_path, "w")
_copy_config_template(name, title, template, path, key, settings)
puts(colored.green("\nInitial commit"))
puts(git.add())
puts(git.commit(m=.format(name, template[])))
_install_requirements(path)
with ensure_project(command, args, path) as site:
site.call_hook("newproject", site, git)
puts("\nAll done! To preview your new project, type:\n")
puts("{0} {1}".format(colored.green("tarbell switch"), colored.green(name)))
puts("\nor\n")
puts("{0}".format(colored.green("cd %s" % path)))
puts("{0}".format(colored.green("tarbell serve\n")))
puts("\nYou got this!\n")
|
Helper to create new project.
|
372,457
|
def get_text_stream(name, encoding=None, errors=):
opener = text_streams.get(name)
if opener is None:
raise TypeError( % name)
return opener(encoding, errors)
|
Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts on Python 3
for already correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
|
372,458
|
def update(self, response, **kwargs):
response_cls = super(
LocationResponseClassLegacyAccessor, self)._get_instance(**kwargs)
if response_cls:
setattr(response_cls, self.column, self.accessor(response))
setattr(
response_cls, self.venue_column, self.venue_accessor(response))
_action_and_commit(response_cls, session.add)
|
If a record matching the instance already exists in the database, update
both the column and venue column attributes, else create a new record.
|
372,459
|
def from_Solis(filepath, name=None, parent=None, verbose=True) -> Data:
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
if not ".asc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
if not name:
name = filepath.name.split(".")[0]
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
axis0 = []
arr = []
attrs = {}
while True:
line = f.readline().strip()[:-1]
if len(line) == 0:
break
else:
line = line.split(",")
line = [float(x) for x in line]
axis0.append(line.pop(0))
arr.append(line)
i = 0
while i < 3:
line = f.readline().strip()
if len(line) == 0:
i += 1
else:
try:
key, val = line.split(":", 1)
except ValueError:
pass
else:
attrs[key.strip()] = val.strip()
f.close()
created = attrs["Date and Time"]
created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
created = timestamp.TimeStamp(time.mktime(created)).RFC3339
kwargs = {"name": name, "kind": "Solis", "source": filestr, "created": created}
if parent is None:
data = Data(**kwargs)
else:
data = parent.create_data(**kwargs)
arr = np.array(arr)
arr /= float(attrs["Exposure Time (secs)"])
arr = data.create_channel(name="signal", values=arr, signed=False, units="Hz")
axis0 = np.array(axis0)
if float(attrs["Grating Groove Density (l/mm)"]) == 0:
xname = "xindex"
xunits = None
else:
xname = "wm"
xunits = "nm"
data.create_variable(name=xname, values=axis0[:, None], units=xunits)
data.create_variable(name="yindex", values=np.arange(arr.shape[1])[None, :], units=None)
data.transform(data.variables[0].natural_name, "yindex")
for key, val in attrs.items():
data.attrs[key] = val
if verbose:
print("data created at {0}".format(data.fullpath))
print(" axes: {0}".format(data.axis_names))
print(" shape: {0}".format(data.shape))
return data
|
Create a data object from Andor Solis software (ascii exports).
Parameters
----------
filepath : path-like
Path to .txt file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
data
New data object.
|
372,460
|
def parse_plays_stream(self):
lx_doc = self.html_doc()
if lx_doc is not None:
parser = PlayParser(self.game_key.season, self.game_key.game_type)
plays = lx_doc.xpath()
for p in plays:
p_obj = parser.build_play(p)
self.plays.append(p_obj)
yield p_obj
|
Generate and yield a stream of parsed plays. Useful for per play processing.
|
372,461
|
def accpro20_summary(self, cutoff):
summary = {}
if cutoff < 1:
cutoff = 1 * 100
records = read_accpro20(self.out_accpro20)
for k,v in records.items():
seq_summary = {}
exposed = 0
buried = 0
for s in v:
if s > cutoff:
exposed += 1
else:
buried += 1
seq_summary[] = exposed / float(len(v))
seq_summary[] = buried / float(len(v))
summary[k] = seq_summary
return summary
|
Parse the ACCpro output file and return a summary of percent exposed/buried residues based on a cutoff.
Below the cutoff = buried
Equal to or greater than cutoff = exposed
The default cutoff used in accpro is 25%.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Args:
cutoff (float): Cutoff for defining a buried or exposed residue.
Returns:
dict: Percentage of buried and exposed residues
|
372,462
|
def embedded_preview(src_path):
try:
assert(exists(src_path) and isdir(src_path))
preview_list = glob(join(src_path, , ))
assert(preview_list)
with NamedTemporaryFile(prefix=, suffix=extension(preview_path), delete=False) as tempfileobj:
dest_path = tempfileobj.name
shutil.copy(preview_path, dest_path)
assert(exists(dest_path))
return dest_path
except:
return None
|
Returns path to temporary copy of embedded QuickLook preview, if it exists
|
372,463
|
def solve(self):
fmtstr, nsep = self.display_start()
self.timer.start([, , ,
])
for self.k in range(self.k, self.k + self.opt[]):
self.store_prev()
if self.opt[, ] and self.k >= 0:
self.timer.stop()
self.backtracking()
self.timer.start()
else:
self.proximal_step()
self.combination_step()
self.timer.stop([, ])
if not self.opt[]:
frcxd, adapt_tol = self.compute_residuals()
self.timer.start()
self.timer.stop([, ,
])
if not self.opt[]:
itst = self.iteration_stats(self.k, frcxd)
self.itstat.append(itst)
self.display_status(fmtstr, itst)
self.timer.start([, ,
])
if self.opt[] is not None:
if self.opt[](self):
break
if not self.opt[]:
if frcxd < adapt_tol:
break
self.k += 1
self.timer.stop([, , ,
])
self.display_end(nsep)
return self.getmin()
|
Start (or re-start) optimisation. This method implements the
framework for the iterations of a FISTA algorithm. There is
sufficient flexibility in overriding the component methods that
it calls that it is usually not necessary to override this method
in derived clases.
If option ``Verbose`` is ``True``, the progress of the
optimisation is displayed at every iteration. At termination
of this method, attribute :attr:`itstat` is a list of tuples
representing statistics of each iteration, unless option
``FastSolve`` is ``True`` and option ``Verbose`` is ``False``.
Attribute :attr:`timer` is an instance of :class:`.util.Timer`
that provides the following labelled timers:
``init``: Time taken for object initialisation by
:meth:`__init__`
``solve``: Total time taken by call(s) to :meth:`solve`
``solve_wo_func``: Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics
``solve_wo_rsdl`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals
``solve_wo_btrack`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals and implemented ``BackTrack`` mechanism
|
372,464
|
def attach_volume_to_device(self, volume_id, device_id):
try:
volume = self.manager.get_volume(volume_id)
volume.attach(device_id)
except packet.baseapi.Error as msg:
raise PacketManagerException(msg)
return volume
|
Attaches the created Volume to a Device.
|
372,465
|
def ctcBeamSearch(mat, classes, lm, k, beamWidth):
blankIdx = len(classes)
maxT, maxC = mat.shape
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
for t in range(maxT):
curr = BeamState()
bestLabelings = last.sort()[0:beamWidth]
for labeling in bestLabelings:
prNonBlank = 0
if labeling:
try:
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
except FloatingPointError:
prNonBlank = 0
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
addBeam(curr, labeling)
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[labeling].prText
curr.entries[labeling].lmApplied = True
for c in range(maxC - 1):
newLabeling = labeling + (c,)
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[labeling].prTotal
addBeam(curr, newLabeling)
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
last = curr
last.norm()
bestLabelings = last.sort()[:k]
output = []
for bestLabeling in bestLabelings:
res =
for l in bestLabeling:
res += classes[l]
output.append(res)
return output
|
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
|
372,466
|
def _search_dirs(self, dirs, basename, extension=""):
for d in dirs:
path = os.path.join(d, % (basename, extension))
if os.path.exists(path):
return path
return None
|
Search a list of directories for a given filename or directory name.
Iterator over the supplied directories, returning the first file
found with the supplied name and extension.
:param dirs: a list of directories
:param basename: the filename
:param extension: the file extension, for example '.conf'
:returns: the path to a matching file, or None
|
372,467
|
def parse_scale(x):
match = re.match(r, x)
if not match:
raise ValueError( % x)
return match.group(1), int(match.group(2))
|
Splits a "%s:%d" string and returns the string and number.
:return: A ``(string, int)`` pair extracted from ``x``.
:raise ValueError: the string ``x`` does not respect the input format.
|
372,468
|
def get_markdown_levels(lines, levels=set((0, 1, 2, 3, 4, 5, 6))):
r
if isinstance(levels, (int, float, basestring, str, bytes)):
levels = [float(levels)]
levels = set([int(i) for i in levels])
if isinstance(lines, basestring):
lines = lines.splitlines()
level_lines = []
for line in lines:
level_line = None
if 0 in levels:
level_line = (0, line)
lstripped = line.lstrip()
for i in range(6, 1, -1):
if lstripped.startswith( * i):
level_line = (i, lstripped[i:].lstrip())
break
if level_line and level_line[0] in levels:
level_lines.append(level_line)
return level_lines
|
r""" Return a list of 2-tuples with a level integer for the heading levels
>>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n')
[(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n')
[(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2)
[(2, 'bad')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1)
[]
|
372,469
|
def _compute_geometric_decay_term(self, C, mag, dists):
c1 = self.CONSTS[]
return (
(C[] + C[] * (mag - c1)) *
np.log(np.sqrt(dists.rjb ** 2.0 + C[] ** 2.0))
)
|
Compute and return geometric decay term in equation 3,
page 970.
|
372,470
|
def calculate_ef_var(tpf, fpf):
efvara = (tpf * (1 - tpf))
efvard = (fpf * (1 - fpf))
ef = tpf / fpf
if fpf == 1:
return(0, 0, 0)
else:
s = ef * ( 1 + (np.log(ef)/np.log(fpf)))
s2 = s * s
return (efvara, efvard, s2)
|
determine variance due to actives (efvar_a) decoys (efvar_d) and s2, the slope of the ROC curve tangent to the
fpf @ which the enrichment factor was calculated
:param tpf: float tpf @ which the enrichment factor was calculated
:param fpf: float fpf @ which the enrichment factor was calculated
:return efvara, efvard, s2: tuple
|
372,471
|
def makeringlatticeCIJ(n, k, seed=None):
s global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
'
rng = get_rng(seed)
CIJ = np.zeros((n, n))
CIJ1 = np.ones((n, n))
kk = 0
count = 0
seq = range(1, n)
seq2 = range(n - 1, 0, -1)
while kk < k:
count += 1
dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1)
dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1)
dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T
CIJ += dCIJ
kk = int(np.sum(CIJ))
overby = kk - k
if overby:
i, j = np.where(dCIJ)
rp = rng.permutation(np.size(i))
for ii in range(overby):
CIJ[i[rp[ii]], j[rp[ii]]] = 0
return CIJ
|
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
|
372,472
|
def read_records(file):
records = []
for record_data in read_recordio(file):
record = Record()
record.ParseFromString(record_data)
records.append(record)
return records
|
Eagerly read a collection of amazon Record protobuf objects from file.
|
372,473
|
def list_users(verbose=True, hashes=False):
*
users = {} if verbose else []
if verbose:
res = __salt__[](
.format(hashes="--smbpasswd-style" if hashes else ""),
)
if res[] > 0:
log.error(res[] if in res else res[])
return users
user_data = {}
for user in res[].splitlines():
if user.startswith():
if in user_data:
users[user_data[]] = user_data
user_data = {}
elif in user:
label = user[:user.index()].strip().lower()
data = user[(user.index()+1):].strip()
user_data[label] = data
if user_data:
users[user_data[]] = user_data
else:
res = __salt__[]()
if res[] > 0:
return {: res[] if in res else res[]}
for user in res[].splitlines():
if not in user:
continue
user_data = user.split()
if len(user_data) >= 3:
users.append(user_data[0])
return users
|
List user accounts
verbose : boolean
return all information
hashes : boolean
include NT HASH and LM HASH in verbose output
CLI Example:
.. code-block:: bash
salt '*' pdbedit.list
|
372,474
|
def response(self, parameters):
r
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
|
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
|
372,475
|
def authenticate(devices, params, facet, check_only):
for device in devices[:]:
try:
device.open()
except:
devices.remove(device)
try:
prompted = False
while devices:
removed = []
for device in devices:
try:
return u2f.authenticate(device, params, facet, check_only)
except exc.APDUError as e:
if e.code == APDU_USE_NOT_SATISFIED:
if check_only:
sys.stderr.write()
sys.exit(0)
if not prompted:
sys.stderr.write(
)
prompted = True
else:
removed.append(device)
except exc.DeviceError:
removed.append(device)
devices = [d for d in devices if d not in removed]
for d in removed:
d.close()
time.sleep(0.25)
finally:
for device in devices:
device.close()
sys.stderr.write()
sys.exit(1)
|
Interactively authenticates a AuthenticateRequest using an attached U2F
device.
|
372,476
|
def set_server_callback(self, handle):
if self.on_events:
for event in self.on_events:
handle.on_event(event, self.on_event)
if self.on_changes:
for change in self.on_changes:
if change in [, ]:
continue
handle.on_change(change, self.on_change)
|
Set up on_change events for bokeh server interactions.
|
372,477
|
def _ExecuteTransaction(self, transaction):
def Action(connection):
connection.cursor.execute("START TRANSACTION")
for query in transaction:
connection.cursor.execute(query["query"], query["args"])
connection.cursor.execute("COMMIT")
return connection.cursor.fetchall()
return self._RetryWrapper(Action)
|
Get connection from pool and execute transaction.
|
372,478
|
def rate(self):
end = self._end_time if self._end_time else time.time()
return self._count / (end - self._start_time)
|
Report the insertion rate in records per second
|
372,479
|
def adjustSize( self ):
cell = self.scene().cellWidth() * 2
minheight = cell
minwidth = 2 * cell
metrics = QFontMetrics(QApplication.font())
width = metrics.width(self.displayName()) + 20
width = ((width/cell) * cell) + (cell % width)
height = self.rect().height()
icon = self.icon()
if icon and not icon.isNull():
width += self.iconSize().width() + 2
height = max(height, self.iconSize().height() + 2)
w = max(width, minwidth)
h = max(height, minheight)
max_w = self.maximumWidth()
max_h = self.maximumHeight()
if max_w is not None:
w = min(w, max_w)
if max_h is not None:
h = min(h, max_h)
self.setMinimumWidth(w)
self.setMinimumHeight(h)
self.rebuild()
|
Adjusts the size of this node to support the length of its contents.
|
372,480
|
def __op(name, val, fmt=None, const=False, consume=0, produce=0):
name = name.lower()
if isinstance(fmt, str):
fmt = partial(_unpack, compile_struct(fmt))
operand = (name, val, fmt, consume, produce, const)
assert(name not in __OPTABLE)
assert(val not in __OPTABLE)
__OPTABLE[name] = operand
__OPTABLE[val] = operand
return val
|
provides sensible defaults for a code, and registers it with the
__OPTABLE for lookup.
|
372,481
|
def bubble_sizes_ref(self, series):
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$C$%d:$C$%d" % (top_row, bottom_row)
|
The Excel worksheet reference to the range containing the bubble
sizes for *series* (not including the column heading cell).
|
372,482
|
def _do_request(self, url, params=None, data=None, headers=None):
if not headers:
headers = {: }
try:
response = requests.get(
url, params=params, data=data, headers=headers)
except:
return None
if response.status_code == 200:
return response
|
Realiza as requisições diversas utilizando a biblioteca requests,
tratando de forma genérica as exceções.
|
372,483
|
def pb2dict(obj):
adict = {}
if not obj.IsInitialized():
return None
for field in obj.DESCRIPTOR.fields:
if not getattr(obj, field.name):
continue
if not field.label == FD.LABEL_REPEATED:
if not field.type == FD.TYPE_MESSAGE:
adict[field.name] = getattr(obj, field.name)
else:
value = pb2dict(getattr(obj, field.name))
if value:
adict[field.name] = value
else:
if field.type == FD.TYPE_MESSAGE:
adict[field.name] = \
[pb2dict(v) for v in getattr(obj, field.name)]
else:
adict[field.name] = [v for v in getattr(obj, field.name)]
return adict
|
Takes a ProtoBuf Message obj and convertes it to a dict.
|
372,484
|
def get_log_entry_log_assignment_session(self, proxy):
if not self.supports_log_entry_log_assignment():
raise errors.Unimplemented()
return sessions.LogEntryLogAssignmentSession(proxy=proxy, runtime=self._runtime)
|
Gets the session for assigning log entry to log mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogEntryLogAssignmentSession) - a
``LogEntryLogAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_entry_log_assignment()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_entry_log_assignment()`` is ``true``.*
|
372,485
|
def download(self, location, local_dir=):
self.logger.debug()
bucket = self.info[]
prefix = self.info[]
self.logger.debug()
s3conn = self.client
location = location.strip()
self.logger.debug()
objects = s3conn.list_objects(Bucket=bucket, Prefix=(prefix++location))
if not in objects:
raise ValueError(.format(bucket, prefix, location))
for s3key in objects[]:
key = s3key[]
if not key or key.endswith():
continue
filepath = key.replace(prefix++location, , 1).lstrip()
filename = key.split()[-1]
file_dir = filepath.split()[:-1]
file_dir = .join(file_dir)
full_dir = os.path.join(local_dir, file_dir)
if not os.path.isdir(full_dir):
os.makedirs(full_dir)
s3conn.download_file(bucket, key, os.path.join(full_dir, filename))
self.logger.debug()
|
Download content from bucket/prefix/location.
Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif)
If location is a directory, all files in the directory are
downloaded. If it is a file, then that file is downloaded.
Args:
location (str): S3 location within prefix.
local_dir (str): Local directory where file(s) will be stored.
Default is here.
|
372,486
|
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
meta = getattr(self.object, )
self.object.delete()
messages.success(
request,
_(u).format(
meta.verbose_name.lower(),
str(self.object)
)
)
return redirect(success_url)
|
Processes deletion of the specified instance.
:param request: the request instance.
:rtype: django.http.HttpResponse.
|
372,487
|
def get_log(self, offset, count=10, callback=None):
params = {: offset, : count}
return self.execute_command(, params, callback=callback)
|
Retrieve log records from camera.
cmd: getLog
param:
offset: log offset for first record
count: number of records to return
|
372,488
|
def _build_query_url(self, page = None, verbose = False):
query = []
if len(self.filters) > 0:
query.append(urlencode(self.filters))
if self.sort:
query_str = u"%s=%s" % (u"sort", self.sort)
query.append(query_str)
if self.sort_by:
query_str = u"%s=%s" % (u"sort_by", self.sort_by)
query.append(query_str)
if self.per_page:
query_str = u"%s=%s" % (u"per_page", self.per_page)
query.append(query_str)
if page:
query_str = u"%s=%s" % (u"page", page)
query.append(query_str)
query = u"?%s" % (u"&".join(query))
url = u"%s%s" % (self.get_list_endpoint()[],query)
url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, url)
msg = "_build_query_url: url:%s" % url
log.debug(msg)
if verbose:
print msg
return url
|
builds the url to call
|
372,489
|
def reboot(self, timeout=1):
namespace = System.getServiceType("reboot")
uri = self.getControlURL(namespace)
self.execute(uri, namespace, "Reboot", timeout=timeout)
|
Reboot the device
|
372,490
|
def select_token(request, scopes=, new=False):
@tokens_required(scopes=scopes, new=new)
def _token_list(r, tokens):
context = {
: tokens,
: app_settings.ESI_BASE_TEMPLATE,
}
return render(r, , context=context)
return _token_list(request)
|
Presents the user with a selection of applicable tokens for the requested view.
|
372,491
|
def google_storage_url(self, sat):
filename = sat[] +
return url_builder([self.google, sat[], sat[], sat[], filename])
|
Returns a google storage url the contains the scene provided.
:param sat:
Expects an object created by scene_interpreter method
:type sat:
dict
:returns:
(String) The URL to a google storage file
|
372,492
|
def updateUserRole(self,
user,
role):
url = self._url + "/updateuserrole"
params = {
"f" : "json",
"user" : user,
"role" : role
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
The Update User Role operation allows the administrator of an org
anization to update the role of a user within a portal.
Inputs:
role - Sets the user's role.
Roles are the following:
org_user - Ability to add items, create groups, and
share in the organization.
org_publisher - Same privileges as org_user plus the
ability to publish hosted services from ArcGIS for
Desktop and ArcGIS Online.
org_admin - In addition to add, create, share, and publish
capabilities, an org_admin administers and customizes
the organization.
Example: role=org_publisher
user - The username whose role you want to change.
|
372,493
|
def _one_projector(args: Dict[str, Any], index: int) -> Union[int, np.ndarray]:
num_shard_qubits = args[]
shard_num = args[]
if index >= num_shard_qubits:
return _kth_bit(shard_num, index - num_shard_qubits)
return _zero_one_vects(args)[index]
|
Returns a projector onto the |1> subspace of the index-th qubit.
|
372,494
|
def rate_limits(self):
if not self._rate_limits:
self._rate_limits = utilities.get_rate_limits(self.response)
return self._rate_limits
|
Returns a list of rate limit details.
|
372,495
|
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
|
Read a hyperparameter `key` in the optimizer dictionary.
|
372,496
|
def execute_no_results(self, sock_info, generator):
if self.bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
coll = self.collection
write_concern = WriteConcern(w=int(self.ordered))
op_id = _randint()
for run in generator:
try:
if run.op_type == _INSERT:
coll._insert(
sock_info,
run.ops,
self.ordered,
write_concern=write_concern,
op_id=op_id,
bypass_doc_val=self.bypass_doc_val)
elif run.op_type == _UPDATE:
for operation in run.ops:
doc = operation[]
check_keys = True
if doc and next(iter(doc)).startswith():
check_keys = False
coll._update(
sock_info,
operation[],
doc,
operation[],
check_keys,
operation[],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered,
bypass_doc_val=self.bypass_doc_val)
else:
for operation in run.ops:
coll._delete(sock_info,
operation[],
not operation[],
write_concern,
op_id,
self.ordered)
except OperationFailure:
if self.ordered:
break
|
Execute all operations, returning no results (w=0).
|
372,497
|
def stop_all(self):
pool = Pool(concurrency=3)
for node in self.nodes.values():
pool.append(node.stop)
yield from pool.join()
|
Stop all nodes
|
372,498
|
def array_type(data_types, field):
from sqlalchemy.dialects import postgresql
internal_type = field.base_field.get_internal_type()
if internal_type in data_types and internal_type != :
sub_type = data_types[internal_type](field)
if not isinstance(sub_type, (list, tuple)):
sub_type = [sub_type]
else:
raise RuntimeError()
return postgresql.ARRAY(sub_type)
|
Allows conversion of Django ArrayField to SQLAlchemy Array.
Takes care of mapping the type of the array element.
|
372,499
|
def asof_locs(self, where, mask):
locs = self.values[mask].searchsorted(where.values, side=)
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where.values < self.values[first])] = -1
return result
|
Find the locations (indices) of the labels from the index for
every entry in the `where` argument.
As in the `asof` function, if the label (a particular entry in
`where`) is not in the index, the latest index label upto the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in `where`,
-1 is returned.
`mask` is used to ignore NA values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : array-like
Array of booleans denoting where values in the original
data are not NA.
Returns
-------
numpy.ndarray
An array of locations (indices) of the labels from the Index
which correspond to the return values of the `asof` function
for every element in `where`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.