repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
internetarchive/warc | warc/warc.py | WARCFile.write_record | def write_record(self, warc_record):
"""Adds a warc record to this WARC file.
"""
warc_record.write_to(self.fileobj)
# Each warc record is written as separate member in the gzip file
# so that each record can be read independetly.
if isinstance(self.fileobj, gzip2.GzipFile):
self.fileobj.close_member() | python | def write_record(self, warc_record):
"""Adds a warc record to this WARC file.
"""
warc_record.write_to(self.fileobj)
# Each warc record is written as separate member in the gzip file
# so that each record can be read independetly.
if isinstance(self.fileobj, gzip2.GzipFile):
self.fileobj.close_member() | [
"def",
"write_record",
"(",
"self",
",",
"warc_record",
")",
":",
"warc_record",
".",
"write_to",
"(",
"self",
".",
"fileobj",
")",
"# Each warc record is written as separate member in the gzip file",
"# so that each record can be read independetly.",
"if",
"isinstance",
"(",... | Adds a warc record to this WARC file. | [
"Adds",
"a",
"warc",
"record",
"to",
"this",
"WARC",
"file",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L265-L272 | train | 48,600 |
internetarchive/warc | warc/warc.py | WARCFile.tell | def tell(self):
"""Returns the file offset. If this is a compressed file, then the
offset in the compressed file is returned.
"""
if isinstance(self.fileobj, gzip2.GzipFile):
return self.fileobj.fileobj.tell()
else:
return self.fileobj.tell() | python | def tell(self):
"""Returns the file offset. If this is a compressed file, then the
offset in the compressed file is returned.
"""
if isinstance(self.fileobj, gzip2.GzipFile):
return self.fileobj.fileobj.tell()
else:
return self.fileobj.tell() | [
"def",
"tell",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"fileobj",
",",
"gzip2",
".",
"GzipFile",
")",
":",
"return",
"self",
".",
"fileobj",
".",
"fileobj",
".",
"tell",
"(",
")",
"else",
":",
"return",
"self",
".",
"fileobj",
... | Returns the file offset. If this is a compressed file, then the
offset in the compressed file is returned. | [
"Returns",
"the",
"file",
"offset",
".",
"If",
"this",
"is",
"a",
"compressed",
"file",
"then",
"the",
"offset",
"in",
"the",
"compressed",
"file",
"is",
"returned",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L306-L313 | train | 48,601 |
internetarchive/warc | warc/gzip2.py | GzipFile.close_member | def close_member(self):
"""Closes the current member being written.
"""
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True | python | def close_member(self):
"""Closes the current member being written.
"""
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True | [
"def",
"close_member",
"(",
"self",
")",
":",
"# The new member is not yet started, no need to close",
"if",
"self",
".",
"_new_member",
":",
"return",
"self",
".",
"fileobj",
".",
"write",
"(",
"self",
".",
"compress",
".",
"flush",
"(",
")",
")",
"write32u",
... | Closes the current member being written. | [
"Closes",
"the",
"current",
"member",
"being",
"written",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L42-L59 | train | 48,602 |
internetarchive/warc | warc/gzip2.py | GzipFile._start_member | def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False | python | def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False | [
"def",
"_start_member",
"(",
"self",
")",
":",
"if",
"self",
".",
"_new_member",
":",
"self",
".",
"_init_write",
"(",
"self",
".",
"name",
")",
"self",
".",
"_write_gzip_header",
"(",
")",
"self",
".",
"_new_member",
"=",
"False"
] | Starts writing a new member if required. | [
"Starts",
"writing",
"a",
"new",
"member",
"if",
"required",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L61-L67 | train | 48,603 |
internetarchive/warc | warc/gzip2.py | GzipFile.close | def close(self):
"""Closes the gzip with care to handle multiple members.
"""
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None | python | def close(self):
"""Closes the gzip with care to handle multiple members.
"""
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"fileobj",
"is",
"None",
":",
"return",
"if",
"self",
".",
"mode",
"==",
"WRITE",
":",
"self",
".",
"close_member",
"(",
")",
"self",
".",
"fileobj",
"=",
"None",
"elif",
"self",
".",
"mode"... | Closes the gzip with care to handle multiple members. | [
"Closes",
"the",
"gzip",
"with",
"care",
"to",
"handle",
"multiple",
"members",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L73-L86 | train | 48,604 |
internetarchive/warc | warc/gzip2.py | GzipFile.read_member | def read_member(self):
"""Returns a file-like object to read one member from the gzip file.
"""
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self | python | def read_member(self):
"""Returns a file-like object to read one member from the gzip file.
"""
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self | [
"def",
"read_member",
"(",
"self",
")",
":",
"if",
"self",
".",
"_member_lock",
"is",
"False",
":",
"self",
".",
"_member_lock",
"=",
"True",
"if",
"self",
".",
"_new_member",
":",
"try",
":",
"# Read one byte to move to the next member",
"BaseGzipFile",
".",
... | Returns a file-like object to read one member from the gzip file. | [
"Returns",
"a",
"file",
"-",
"like",
"object",
"to",
"read",
"one",
"member",
"from",
"the",
"gzip",
"file",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L95-L109 | train | 48,605 |
internetarchive/warc | warc/gzip2.py | GzipFile.write_member | def write_member(self, data):
"""Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
"""
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member() | python | def write_member(self, data):
"""Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
"""
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member() | [
"def",
"write_member",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"basestring",
")",
":",
"self",
".",
"write",
"(",
"data",
")",
"else",
":",
"for",
"text",
"in",
"data",
":",
"self",
".",
"write",
"(",
"text",
")",
... | Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object. | [
"Writes",
"the",
"given",
"data",
"as",
"one",
"gzip",
"member",
".",
"The",
"data",
"can",
"be",
"a",
"string",
"an",
"iterator",
"that",
"gives",
"strings",
"or",
"a",
"file",
"-",
"like",
"object",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L111-L121 | train | 48,606 |
internetarchive/warc | warc/arc.py | ARCHeader.write_to | def write_to(self, f, version = None):
"""
Writes out the arc header to the file like object `f`.
If the version field is 1, it writes out an arc v1 header,
otherwise (and this is default), it outputs a v2 header.
"""
if not version:
version = self.version
if version == 1:
header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s"
elif version == 2:
header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(result_code)s %(checksum)s %(location)s %(offset)s %(filename)s %(length)s"
header = header%dict(url = self['url'],
ip_address = self['ip_address'],
date = self['date'],
content_type = self['content_type'],
result_code = self['result_code'],
checksum = self['checksum'],
location = self['location'],
offset = self['offset'],
filename = self['filename'],
length = self['length'])
f.write(header) | python | def write_to(self, f, version = None):
"""
Writes out the arc header to the file like object `f`.
If the version field is 1, it writes out an arc v1 header,
otherwise (and this is default), it outputs a v2 header.
"""
if not version:
version = self.version
if version == 1:
header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s"
elif version == 2:
header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(result_code)s %(checksum)s %(location)s %(offset)s %(filename)s %(length)s"
header = header%dict(url = self['url'],
ip_address = self['ip_address'],
date = self['date'],
content_type = self['content_type'],
result_code = self['result_code'],
checksum = self['checksum'],
location = self['location'],
offset = self['offset'],
filename = self['filename'],
length = self['length'])
f.write(header) | [
"def",
"write_to",
"(",
"self",
",",
"f",
",",
"version",
"=",
"None",
")",
":",
"if",
"not",
"version",
":",
"version",
"=",
"self",
".",
"version",
"if",
"version",
"==",
"1",
":",
"header",
"=",
"\"%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s... | Writes out the arc header to the file like object `f`.
If the version field is 1, it writes out an arc v1 header,
otherwise (and this is default), it outputs a v2 header. | [
"Writes",
"out",
"the",
"arc",
"header",
"to",
"the",
"file",
"like",
"object",
"f",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L69-L94 | train | 48,607 |
internetarchive/warc | warc/arc.py | ARCRecord.from_string | def from_string(cls, string, version):
"""
Constructs an ARC record from a string and returns it.
TODO: It might be best to merge this with the _read_arc_record
function rather than reimplement the functionality here.
"""
header, payload = string.split("\n",1)
if payload[0] == '\n': # There's an extra
payload = payload[1:]
if int(version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
return cls(header = arc_header, payload = payload, version = version) | python | def from_string(cls, string, version):
"""
Constructs an ARC record from a string and returns it.
TODO: It might be best to merge this with the _read_arc_record
function rather than reimplement the functionality here.
"""
header, payload = string.split("\n",1)
if payload[0] == '\n': # There's an extra
payload = payload[1:]
if int(version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
return cls(header = arc_header, payload = payload, version = version) | [
"def",
"from_string",
"(",
"cls",
",",
"string",
",",
"version",
")",
":",
"header",
",",
"payload",
"=",
"string",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"if",
"payload",
"[",
"0",
"]",
"==",
"'\\n'",
":",
"# There's an extra",
"payload",
"=",
... | Constructs an ARC record from a string and returns it.
TODO: It might be best to merge this with the _read_arc_record
function rather than reimplement the functionality here. | [
"Constructs",
"an",
"ARC",
"record",
"from",
"a",
"string",
"and",
"returns",
"it",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L161-L179 | train | 48,608 |
internetarchive/warc | warc/arc.py | ARCFile._write_header | def _write_header(self):
"Writes out an ARC header"
if "org" not in self.file_headers:
warnings.warn("Using 'unknown' for Archiving organisation name")
self.file_headers['org'] = "Unknown"
if "date" not in self.file_headers:
now = datetime.datetime.utcnow()
warnings.warn("Using '%s' for Archiving time"%now)
self.file_headers['date'] = now
if "ip_address" not in self.file_headers:
warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving")
self.file_headers['ip_address'] = "127.0.0.1"
if self.version == 1:
payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org'])
elif self.version == 2:
payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length"
else:
raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version)
fname = os.path.basename(self.fileobj.name)
header = ARCHeader(url = "filedesc://%s"%fname,
ip_address = self.file_headers['ip_address'],
date = self.file_headers['date'],
content_type = "text/plain",
length = len(payload),
result_code = "200",
checksum = "-",
location = "-",
offset = str(self.fileobj.tell()),
filename = fname)
arc_file_header_record = ARCRecord(header, payload%self.file_headers)
self.write(arc_file_header_record) | python | def _write_header(self):
"Writes out an ARC header"
if "org" not in self.file_headers:
warnings.warn("Using 'unknown' for Archiving organisation name")
self.file_headers['org'] = "Unknown"
if "date" not in self.file_headers:
now = datetime.datetime.utcnow()
warnings.warn("Using '%s' for Archiving time"%now)
self.file_headers['date'] = now
if "ip_address" not in self.file_headers:
warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving")
self.file_headers['ip_address'] = "127.0.0.1"
if self.version == 1:
payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org'])
elif self.version == 2:
payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length"
else:
raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version)
fname = os.path.basename(self.fileobj.name)
header = ARCHeader(url = "filedesc://%s"%fname,
ip_address = self.file_headers['ip_address'],
date = self.file_headers['date'],
content_type = "text/plain",
length = len(payload),
result_code = "200",
checksum = "-",
location = "-",
offset = str(self.fileobj.tell()),
filename = fname)
arc_file_header_record = ARCRecord(header, payload%self.file_headers)
self.write(arc_file_header_record) | [
"def",
"_write_header",
"(",
"self",
")",
":",
"if",
"\"org\"",
"not",
"in",
"self",
".",
"file_headers",
":",
"warnings",
".",
"warn",
"(",
"\"Using 'unknown' for Archiving organisation name\"",
")",
"self",
".",
"file_headers",
"[",
"'org'",
"]",
"=",
"\"Unkno... | Writes out an ARC header | [
"Writes",
"out",
"an",
"ARC",
"header"
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L264-L295 | train | 48,609 |
internetarchive/warc | warc/arc.py | ARCFile.write | def write(self, arc_record):
"Writes out the given arc record to the file"
if not self.version:
self.version = 2
if not self.header_written:
self.header_written = True
self._write_header()
arc_record.write_to(self.fileobj, self.version)
self.fileobj.write("\n") | python | def write(self, arc_record):
"Writes out the given arc record to the file"
if not self.version:
self.version = 2
if not self.header_written:
self.header_written = True
self._write_header()
arc_record.write_to(self.fileobj, self.version)
self.fileobj.write("\n") | [
"def",
"write",
"(",
"self",
",",
"arc_record",
")",
":",
"if",
"not",
"self",
".",
"version",
":",
"self",
".",
"version",
"=",
"2",
"if",
"not",
"self",
".",
"header_written",
":",
"self",
".",
"header_written",
"=",
"True",
"self",
".",
"_write_head... | Writes out the given arc record to the file | [
"Writes",
"out",
"the",
"given",
"arc",
"record",
"to",
"the",
"file"
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L297-L305 | train | 48,610 |
internetarchive/warc | warc/arc.py | ARCFile._read_file_header | def _read_file_header(self):
"""Reads out the file header for the arc file. If version was
not provided, this will autopopulate it."""
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version) | python | def _read_file_header(self):
"""Reads out the file header for the arc file. If version was
not provided, this will autopopulate it."""
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version) | [
"def",
"_read_file_header",
"(",
"self",
")",
":",
"header",
"=",
"self",
".",
"fileobj",
".",
"readline",
"(",
")",
"payload1",
"=",
"self",
".",
"fileobj",
".",
"readline",
"(",
")",
"payload2",
"=",
"self",
".",
"fileobj",
".",
"readline",
"(",
")",... | Reads out the file header for the arc file. If version was
not provided, this will autopopulate it. | [
"Reads",
"out",
"the",
"file",
"header",
"for",
"the",
"arc",
"file",
".",
"If",
"version",
"was",
"not",
"provided",
"this",
"will",
"autopopulate",
"it",
"."
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L307-L335 | train | 48,611 |
internetarchive/warc | warc/arc.py | ARCFile._read_arc_record | def _read_arc_record(self):
"Reads out an arc record, formats it and returns it"
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == "":
header = self.fileobj.readline()
if header == "":
return None
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header = arc_header, payload = payload) | python | def _read_arc_record(self):
"Reads out an arc record, formats it and returns it"
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == "":
header = self.fileobj.readline()
if header == "":
return None
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header = arc_header, payload = payload) | [
"def",
"_read_arc_record",
"(",
"self",
")",
":",
"#XXX:Noufal Stream payload here rather than just read it",
"# r = self.fileobj.readline() # Drop the initial newline",
"# if r == \"\":",
"# return None",
"# header = self.fileobj.readline()",
"# Strip the initial new lines and read first ... | Reads out an arc record, formats it and returns it | [
"Reads",
"out",
"an",
"arc",
"record",
"formats",
"it",
"and",
"returns",
"it"
] | 8f05a000a23bbd6501217e37cfd862ffdf19da7f | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L337-L366 | train | 48,612 |
figo-connect/schwifty | schwifty/bic.py | BIC.from_bank_code | def from_bank_code(cls, country_code, bank_code):
"""Create a new BIC object from country- and bank-code.
Examples:
>>> bic = BIC.from_bank_code('DE', '20070000')
>>> bic.country_code
'DE'
>>> bic.bank_code
'DEUT'
>>> bic.location_code
'HH'
>>> BIC.from_bank_code('DE', '01010101')
Traceback (most recent call last):
...
ValueError: Invalid bank code '01010101' for country 'DE'
Args:
country_code (str): ISO 3166 alpha2 country-code.
bank_code (str): Country specific bank-code.
Returns:
BIC: a BIC object generated from the given country code and bank code.
Raises:
ValueError: If the given bank code wasn't found in the registry
Note:
This currently only works for German bank-codes.
"""
try:
return cls(registry.get('bank_code')[(country_code, bank_code)]['bic'])
except KeyError:
raise ValueError("Invalid bank code {!r} for country {!r}".format(bank_code,
country_code)) | python | def from_bank_code(cls, country_code, bank_code):
"""Create a new BIC object from country- and bank-code.
Examples:
>>> bic = BIC.from_bank_code('DE', '20070000')
>>> bic.country_code
'DE'
>>> bic.bank_code
'DEUT'
>>> bic.location_code
'HH'
>>> BIC.from_bank_code('DE', '01010101')
Traceback (most recent call last):
...
ValueError: Invalid bank code '01010101' for country 'DE'
Args:
country_code (str): ISO 3166 alpha2 country-code.
bank_code (str): Country specific bank-code.
Returns:
BIC: a BIC object generated from the given country code and bank code.
Raises:
ValueError: If the given bank code wasn't found in the registry
Note:
This currently only works for German bank-codes.
"""
try:
return cls(registry.get('bank_code')[(country_code, bank_code)]['bic'])
except KeyError:
raise ValueError("Invalid bank code {!r} for country {!r}".format(bank_code,
country_code)) | [
"def",
"from_bank_code",
"(",
"cls",
",",
"country_code",
",",
"bank_code",
")",
":",
"try",
":",
"return",
"cls",
"(",
"registry",
".",
"get",
"(",
"'bank_code'",
")",
"[",
"(",
"country_code",
",",
"bank_code",
")",
"]",
"[",
"'bic'",
"]",
")",
"exce... | Create a new BIC object from country- and bank-code.
Examples:
>>> bic = BIC.from_bank_code('DE', '20070000')
>>> bic.country_code
'DE'
>>> bic.bank_code
'DEUT'
>>> bic.location_code
'HH'
>>> BIC.from_bank_code('DE', '01010101')
Traceback (most recent call last):
...
ValueError: Invalid bank code '01010101' for country 'DE'
Args:
country_code (str): ISO 3166 alpha2 country-code.
bank_code (str): Country specific bank-code.
Returns:
BIC: a BIC object generated from the given country code and bank code.
Raises:
ValueError: If the given bank code wasn't found in the registry
Note:
This currently only works for German bank-codes. | [
"Create",
"a",
"new",
"BIC",
"object",
"from",
"country",
"-",
"and",
"bank",
"-",
"code",
"."
] | 69376fade070dbfdf89c57a0060bc290f7a744bb | https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/bic.py#L45-L80 | train | 48,613 |
figo-connect/schwifty | schwifty/iban.py | IBAN.generate | def generate(cls, country_code, bank_code, account_code):
"""Generate an IBAN from it's components.
If the bank-code and/or account-number have less digits than required by their
country specific representation, the respective component is padded with zeros.
Examples:
To generate an IBAN do the following::
>>> bank_code = '37040044'
>>> account_code = '532013000'
>>> iban = IBAN.generate('DE', bank_code, account_code)
>>> iban.formatted
'DE89 3704 0044 0532 0130 00'
Args:
country_code (str): The ISO 3166 alpha-2 country code.
bank_code (str): The country specific bank-code.
account_code (str): The customer specific account-code.
"""
spec = _get_iban_spec(country_code)
bank_code_length = code_length(spec, 'bank_code')
branch_code_length = code_length(spec, 'branch_code')
bank_and_branch_code_length = bank_code_length + branch_code_length
account_code_length = code_length(spec, 'account_code')
if len(bank_code) > bank_and_branch_code_length:
raise ValueError(
"Bank code exceeds maximum size {}".format(bank_and_branch_code_length))
if len(account_code) > account_code_length:
raise ValueError(
"Account code exceeds maximum size {}".format(account_code_length))
bank_code = bank_code.rjust(bank_and_branch_code_length, '0')
account_code = account_code.rjust(account_code_length, '0')
iban = country_code + '??' + bank_code + account_code
return cls(iban) | python | def generate(cls, country_code, bank_code, account_code):
"""Generate an IBAN from it's components.
If the bank-code and/or account-number have less digits than required by their
country specific representation, the respective component is padded with zeros.
Examples:
To generate an IBAN do the following::
>>> bank_code = '37040044'
>>> account_code = '532013000'
>>> iban = IBAN.generate('DE', bank_code, account_code)
>>> iban.formatted
'DE89 3704 0044 0532 0130 00'
Args:
country_code (str): The ISO 3166 alpha-2 country code.
bank_code (str): The country specific bank-code.
account_code (str): The customer specific account-code.
"""
spec = _get_iban_spec(country_code)
bank_code_length = code_length(spec, 'bank_code')
branch_code_length = code_length(spec, 'branch_code')
bank_and_branch_code_length = bank_code_length + branch_code_length
account_code_length = code_length(spec, 'account_code')
if len(bank_code) > bank_and_branch_code_length:
raise ValueError(
"Bank code exceeds maximum size {}".format(bank_and_branch_code_length))
if len(account_code) > account_code_length:
raise ValueError(
"Account code exceeds maximum size {}".format(account_code_length))
bank_code = bank_code.rjust(bank_and_branch_code_length, '0')
account_code = account_code.rjust(account_code_length, '0')
iban = country_code + '??' + bank_code + account_code
return cls(iban) | [
"def",
"generate",
"(",
"cls",
",",
"country_code",
",",
"bank_code",
",",
"account_code",
")",
":",
"spec",
"=",
"_get_iban_spec",
"(",
"country_code",
")",
"bank_code_length",
"=",
"code_length",
"(",
"spec",
",",
"'bank_code'",
")",
"branch_code_length",
"=",... | Generate an IBAN from it's components.
If the bank-code and/or account-number have less digits than required by their
country specific representation, the respective component is padded with zeros.
Examples:
To generate an IBAN do the following::
>>> bank_code = '37040044'
>>> account_code = '532013000'
>>> iban = IBAN.generate('DE', bank_code, account_code)
>>> iban.formatted
'DE89 3704 0044 0532 0130 00'
Args:
country_code (str): The ISO 3166 alpha-2 country code.
bank_code (str): The country specific bank-code.
account_code (str): The customer specific account-code. | [
"Generate",
"an",
"IBAN",
"from",
"it",
"s",
"components",
"."
] | 69376fade070dbfdf89c57a0060bc290f7a744bb | https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/iban.py#L75-L113 | train | 48,614 |
ZELLMECHANIK-DRESDEN/dclab | dclab/cli.py | tdms2rtdc | def tdms2rtdc():
"""Convert .tdms datasets to the hdf5-based .rtdc file format"""
parser = tdms2rtdc_parser()
args = parser.parse_args()
path_tdms = pathlib.Path(args.tdms_path).resolve()
path_rtdc = pathlib.Path(args.rtdc_path)
# Determine whether input path is a tdms file or a directory
if path_tdms.is_dir():
files_tdms = fmt_tdms.get_tdms_files(path_tdms)
if path_rtdc.is_file():
raise ValueError("rtdc_path is a file: {}".format(path_rtdc))
files_rtdc = []
for ff in files_tdms:
ff = pathlib.Path(ff)
rp = ff.relative_to(path_tdms)
# determine output file name (same relative path)
rpr = path_rtdc / rp.with_suffix(".rtdc")
files_rtdc.append(rpr)
else:
files_tdms = [path_tdms]
files_rtdc = [path_rtdc]
for ii in range(len(files_tdms)):
ff = pathlib.Path(files_tdms[ii])
fr = pathlib.Path(files_rtdc[ii])
print_info("Converting {:d}/{:d}: {}".format(
ii + 1, len(files_tdms), ff))
# load dataset
ds = load.load_file(ff)
# create directory
if not fr.parent.exists():
fr.parent.mkdir(parents=True)
# determine features to export
features = []
if args.compute_features:
tocomp = dfn.feature_names
else:
tocomp = ds._events
for feat in tocomp:
if feat not in dfn.scalar_feature_names:
if not ds[feat]:
# ignore non-existent contour, image, mask, or trace
continue
elif feat not in ds:
# ignore non-existent feature
continue
features.append(feat)
# export as hdf5
ds.export.hdf5(path=fr,
features=features,
filtered=False,
override=True) | python | def tdms2rtdc():
"""Convert .tdms datasets to the hdf5-based .rtdc file format"""
parser = tdms2rtdc_parser()
args = parser.parse_args()
path_tdms = pathlib.Path(args.tdms_path).resolve()
path_rtdc = pathlib.Path(args.rtdc_path)
# Determine whether input path is a tdms file or a directory
if path_tdms.is_dir():
files_tdms = fmt_tdms.get_tdms_files(path_tdms)
if path_rtdc.is_file():
raise ValueError("rtdc_path is a file: {}".format(path_rtdc))
files_rtdc = []
for ff in files_tdms:
ff = pathlib.Path(ff)
rp = ff.relative_to(path_tdms)
# determine output file name (same relative path)
rpr = path_rtdc / rp.with_suffix(".rtdc")
files_rtdc.append(rpr)
else:
files_tdms = [path_tdms]
files_rtdc = [path_rtdc]
for ii in range(len(files_tdms)):
ff = pathlib.Path(files_tdms[ii])
fr = pathlib.Path(files_rtdc[ii])
print_info("Converting {:d}/{:d}: {}".format(
ii + 1, len(files_tdms), ff))
# load dataset
ds = load.load_file(ff)
# create directory
if not fr.parent.exists():
fr.parent.mkdir(parents=True)
# determine features to export
features = []
if args.compute_features:
tocomp = dfn.feature_names
else:
tocomp = ds._events
for feat in tocomp:
if feat not in dfn.scalar_feature_names:
if not ds[feat]:
# ignore non-existent contour, image, mask, or trace
continue
elif feat not in ds:
# ignore non-existent feature
continue
features.append(feat)
# export as hdf5
ds.export.hdf5(path=fr,
features=features,
filtered=False,
override=True) | [
"def",
"tdms2rtdc",
"(",
")",
":",
"parser",
"=",
"tdms2rtdc_parser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"path_tdms",
"=",
"pathlib",
".",
"Path",
"(",
"args",
".",
"tdms_path",
")",
".",
"resolve",
"(",
")",
"path_rtdc",
"="... | Convert .tdms datasets to the hdf5-based .rtdc file format | [
"Convert",
".",
"tdms",
"datasets",
"to",
"the",
"hdf5",
"-",
"based",
".",
"rtdc",
"file",
"format"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/cli.py#L21-L75 | train | 48,615 |
ZELLMECHANIK-DRESDEN/dclab | dclab/cli.py | verify_dataset | def verify_dataset():
"""Perform checks on experimental datasets"""
parser = verify_dataset_parser()
args = parser.parse_args()
path_in = pathlib.Path(args.path).resolve()
viol, aler, info = load.check_dataset(path_in)
print_info("Checking {}".format(path_in))
for inf in info:
print_info(inf)
for ale in aler:
print_alert(ale)
for vio in viol:
print_violation(vio)
print_info("Check Complete: {} violations and {} alerts".format(len(viol),
len(aler))) | python | def verify_dataset():
"""Perform checks on experimental datasets"""
parser = verify_dataset_parser()
args = parser.parse_args()
path_in = pathlib.Path(args.path).resolve()
viol, aler, info = load.check_dataset(path_in)
print_info("Checking {}".format(path_in))
for inf in info:
print_info(inf)
for ale in aler:
print_alert(ale)
for vio in viol:
print_violation(vio)
print_info("Check Complete: {} violations and {} alerts".format(len(viol),
len(aler))) | [
"def",
"verify_dataset",
"(",
")",
":",
"parser",
"=",
"verify_dataset_parser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"path_in",
"=",
"pathlib",
".",
"Path",
"(",
"args",
".",
"path",
")",
".",
"resolve",
"(",
")",
"viol",
",",
... | Perform checks on experimental datasets | [
"Perform",
"checks",
"on",
"experimental",
"datasets"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/cli.py#L105-L119 | train | 48,616 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/config.py | load_from_file | def load_from_file(cfg_file):
"""Load the configuration from a file
Parameters
----------
cfg_file: str
Path to configuration file
Returns
-------
cfg : CaseInsensitiveDict
Dictionary with configuration parameters
"""
path = pathlib.Path(cfg_file).resolve()
with path.open('r') as f:
code = f.readlines()
cfg = CaseInsensitiveDict()
for line in code:
# We deal with comments and empty lines
# We need to check line length first and then we look for
# a hash.
line = line.split("#")[0].strip()
if len(line) != 0:
if line.startswith("[") and line.endswith("]"):
section = line[1:-1].lower()
if section not in cfg:
cfg[section] = CaseInsensitiveDict()
continue
var, val = line.split("=", 1)
var = var.strip().lower()
val = val.strip("' ").strip('" ').strip()
# convert parameter value to correct type
if (section in dfn.config_funcs and
var in dfn.config_funcs[section]):
# standard parameter with known type
val = dfn.config_funcs[section][var](val)
else:
# unknown parameter (e.g. plotting in Shape-Out), guess type
var, val = keyval_str2typ(var, val)
if len(var) != 0 and len(str(val)) != 0:
cfg[section][var] = val
return cfg | python | def load_from_file(cfg_file):
"""Load the configuration from a file
Parameters
----------
cfg_file: str
Path to configuration file
Returns
-------
cfg : CaseInsensitiveDict
Dictionary with configuration parameters
"""
path = pathlib.Path(cfg_file).resolve()
with path.open('r') as f:
code = f.readlines()
cfg = CaseInsensitiveDict()
for line in code:
# We deal with comments and empty lines
# We need to check line length first and then we look for
# a hash.
line = line.split("#")[0].strip()
if len(line) != 0:
if line.startswith("[") and line.endswith("]"):
section = line[1:-1].lower()
if section not in cfg:
cfg[section] = CaseInsensitiveDict()
continue
var, val = line.split("=", 1)
var = var.strip().lower()
val = val.strip("' ").strip('" ').strip()
# convert parameter value to correct type
if (section in dfn.config_funcs and
var in dfn.config_funcs[section]):
# standard parameter with known type
val = dfn.config_funcs[section][var](val)
else:
# unknown parameter (e.g. plotting in Shape-Out), guess type
var, val = keyval_str2typ(var, val)
if len(var) != 0 and len(str(val)) != 0:
cfg[section][var] = val
return cfg | [
"def",
"load_from_file",
"(",
"cfg_file",
")",
":",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"cfg_file",
")",
".",
"resolve",
"(",
")",
"with",
"path",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"code",
"=",
"f",
".",
"readlines",
"(",
")",
"... | Load the configuration from a file
Parameters
----------
cfg_file: str
Path to configuration file
Returns
-------
cfg : CaseInsensitiveDict
Dictionary with configuration parameters | [
"Load",
"the",
"configuration",
"from",
"a",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L190-L234 | train | 48,617 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/config.py | keyval_str2typ | def keyval_str2typ(var, val):
"""Convert a variable from a string to its correct type
Parameters
----------
var: str
The variable name
val: str
The value of the variable represented as a string
Returns
-------
varout: str
Stripped lowercase `var`
valout: any type
The value converted from string to its presumed type
Notes
-----
This method is heuristic and is only intended for usage in
dclab.
See Also
--------
keyval_typ2str: the opposite
"""
if not (isinstance(val, str_types)):
# already a type:
return var.strip(), val
var = var.strip().lower()
val = val.strip()
# Find values
if len(var) != 0 and len(val) != 0:
# check for float
if val.startswith("[") and val.endswith("]"):
if len(val.strip("[],")) == 0:
# empty list
values = []
else:
values = val.strip("[],").split(",")
values = [float(v) for v in values]
return var, values
elif val.lower() in ["true", "y"]:
return var, True
elif val.lower() in ["false", "n"]:
return var, False
elif val[0] in ["'", '"'] and val[-1] in ["'", '"']:
return var, val.strip("'").strip('"').strip()
elif val in dfn.scalar_feature_names:
return var, val
else:
try:
return var, float(val.replace(",", "."))
except ValueError:
return var, val | python | def keyval_str2typ(var, val):
"""Convert a variable from a string to its correct type
Parameters
----------
var: str
The variable name
val: str
The value of the variable represented as a string
Returns
-------
varout: str
Stripped lowercase `var`
valout: any type
The value converted from string to its presumed type
Notes
-----
This method is heuristic and is only intended for usage in
dclab.
See Also
--------
keyval_typ2str: the opposite
"""
if not (isinstance(val, str_types)):
# already a type:
return var.strip(), val
var = var.strip().lower()
val = val.strip()
# Find values
if len(var) != 0 and len(val) != 0:
# check for float
if val.startswith("[") and val.endswith("]"):
if len(val.strip("[],")) == 0:
# empty list
values = []
else:
values = val.strip("[],").split(",")
values = [float(v) for v in values]
return var, values
elif val.lower() in ["true", "y"]:
return var, True
elif val.lower() in ["false", "n"]:
return var, False
elif val[0] in ["'", '"'] and val[-1] in ["'", '"']:
return var, val.strip("'").strip('"').strip()
elif val in dfn.scalar_feature_names:
return var, val
else:
try:
return var, float(val.replace(",", "."))
except ValueError:
return var, val | [
"def",
"keyval_str2typ",
"(",
"var",
",",
"val",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"val",
",",
"str_types",
")",
")",
":",
"# already a type:",
"return",
"var",
".",
"strip",
"(",
")",
",",
"val",
"var",
"=",
"var",
".",
"strip",
"(",
... | Convert a variable from a string to its correct type
Parameters
----------
var: str
The variable name
val: str
The value of the variable represented as a string
Returns
-------
varout: str
Stripped lowercase `var`
valout: any type
The value converted from string to its presumed type
Notes
-----
This method is heuristic and is only intended for usage in
dclab.
See Also
--------
keyval_typ2str: the opposite | [
"Convert",
"a",
"variable",
"from",
"a",
"string",
"to",
"its",
"correct",
"type"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L237-L291 | train | 48,618 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/config.py | keyval_typ2str | def keyval_typ2str(var, val):
"""Convert a variable to a string
Parameters
----------
var: str
The variable name
val: any type
The value of the variable
Returns
-------
varout: str
Stripped lowercase `var`
valout: any type
The value converted to a useful string representation
See Also
--------
keyval_str2typ: the opposite
"""
varout = var.strip()
if isinstance(val, list):
data = ", ".join([keyval_typ2str(var, it)[1] for it in val])
valout = "["+data+"]"
elif isinstance(val, float):
valout = "{:.12f}".format(val)
else:
valout = "{}".format(val)
return varout, valout | python | def keyval_typ2str(var, val):
"""Convert a variable to a string
Parameters
----------
var: str
The variable name
val: any type
The value of the variable
Returns
-------
varout: str
Stripped lowercase `var`
valout: any type
The value converted to a useful string representation
See Also
--------
keyval_str2typ: the opposite
"""
varout = var.strip()
if isinstance(val, list):
data = ", ".join([keyval_typ2str(var, it)[1] for it in val])
valout = "["+data+"]"
elif isinstance(val, float):
valout = "{:.12f}".format(val)
else:
valout = "{}".format(val)
return varout, valout | [
"def",
"keyval_typ2str",
"(",
"var",
",",
"val",
")",
":",
"varout",
"=",
"var",
".",
"strip",
"(",
")",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"data",
"=",
"\", \"",
".",
"join",
"(",
"[",
"keyval_typ2str",
"(",
"var",
",",
"it",
... | Convert a variable to a string
Parameters
----------
var: str
The variable name
val: any type
The value of the variable
Returns
-------
varout: str
Stripped lowercase `var`
valout: any type
The value converted to a useful string representation
See Also
--------
keyval_str2typ: the opposite | [
"Convert",
"a",
"variable",
"to",
"a",
"string"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L294-L323 | train | 48,619 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/config.py | Configuration._init_default_values | def _init_default_values(self):
"""Set default initial values
The default values are hard-coded for backwards compatibility
and for several functionalities in dclab.
"""
# Do not filter out invalid event values
self["filtering"]["remove invalid events"] = False
# Enable filters switch is mandatory
self["filtering"]["enable filters"] = True
# Limit events integer to downsample output data
self["filtering"]["limit events"] = 0
# Polygon filter list
self["filtering"]["polygon filters"] = []
# Defaults to no hierarchy parent
self["filtering"]["hierarchy parent"] = "none"
# Check for missing min/max values and set them to zero
for item in dfn.scalar_feature_names:
appends = [" min", " max"]
for a in appends:
self["filtering"][item + a] = 0 | python | def _init_default_values(self):
"""Set default initial values
The default values are hard-coded for backwards compatibility
and for several functionalities in dclab.
"""
# Do not filter out invalid event values
self["filtering"]["remove invalid events"] = False
# Enable filters switch is mandatory
self["filtering"]["enable filters"] = True
# Limit events integer to downsample output data
self["filtering"]["limit events"] = 0
# Polygon filter list
self["filtering"]["polygon filters"] = []
# Defaults to no hierarchy parent
self["filtering"]["hierarchy parent"] = "none"
# Check for missing min/max values and set them to zero
for item in dfn.scalar_feature_names:
appends = [" min", " max"]
for a in appends:
self["filtering"][item + a] = 0 | [
"def",
"_init_default_values",
"(",
"self",
")",
":",
"# Do not filter out invalid event values",
"self",
"[",
"\"filtering\"",
"]",
"[",
"\"remove invalid events\"",
"]",
"=",
"False",
"# Enable filters switch is mandatory",
"self",
"[",
"\"filtering\"",
"]",
"[",
"\"ena... | Set default initial values
The default values are hard-coded for backwards compatibility
and for several functionalities in dclab. | [
"Set",
"default",
"initial",
"values"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L130-L150 | train | 48,620 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/config.py | Configuration.save | def save(self, filename):
"""Save the configuration to a file"""
filename = pathlib.Path(filename)
out = []
keys = sorted(list(self.keys()))
for key in keys:
out.append("[{}]".format(key))
section = self[key]
ikeys = list(section.keys())
ikeys.sort()
for ikey in ikeys:
var, val = keyval_typ2str(ikey, section[ikey])
out.append("{} = {}".format(var, val))
out.append("")
with filename.open("w") as f:
for i in range(len(out)):
# win-like line endings
out[i] = out[i]+"\n"
f.writelines(out) | python | def save(self, filename):
"""Save the configuration to a file"""
filename = pathlib.Path(filename)
out = []
keys = sorted(list(self.keys()))
for key in keys:
out.append("[{}]".format(key))
section = self[key]
ikeys = list(section.keys())
ikeys.sort()
for ikey in ikeys:
var, val = keyval_typ2str(ikey, section[ikey])
out.append("{} = {}".format(var, val))
out.append("")
with filename.open("w") as f:
for i in range(len(out)):
# win-like line endings
out[i] = out[i]+"\n"
f.writelines(out) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"pathlib",
".",
"Path",
"(",
"filename",
")",
"out",
"=",
"[",
"]",
"keys",
"=",
"sorted",
"(",
"list",
"(",
"self",
".",
"keys",
"(",
")",
")",
")",
"for",
"key",
"in",
"k... | Save the configuration to a file | [
"Save",
"the",
"configuration",
"to",
"a",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L160-L179 | train | 48,621 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/config.py | Configuration.update | def update(self, newcfg):
"""Update current config with a dictionary"""
for key in newcfg.keys():
if key not in self._cfg:
self._cfg[key] = CaseInsensitiveDict()
for skey in newcfg[key]:
self._cfg[key][skey] = newcfg[key][skey] | python | def update(self, newcfg):
"""Update current config with a dictionary"""
for key in newcfg.keys():
if key not in self._cfg:
self._cfg[key] = CaseInsensitiveDict()
for skey in newcfg[key]:
self._cfg[key][skey] = newcfg[key][skey] | [
"def",
"update",
"(",
"self",
",",
"newcfg",
")",
":",
"for",
"key",
"in",
"newcfg",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"_cfg",
":",
"self",
".",
"_cfg",
"[",
"key",
"]",
"=",
"CaseInsensitiveDict",
"(",
")",
"for"... | Update current config with a dictionary | [
"Update",
"current",
"config",
"with",
"a",
"dictionary"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L181-L187 | train | 48,622 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/emodulus.py | convert | def convert(area_um, deform, emodulus,
channel_width_in, channel_width_out,
flow_rate_in, flow_rate_out,
viscosity_in, viscosity_out,
inplace=False):
"""convert area-deformation-emodulus triplet
The conversion formula is described in :cite:`Mietke2015`.
Parameters
----------
area_um: ndarray
Convex cell area [µm²]
deform: ndarray
Deformation
emodulus: ndarray
Young's Modulus [kPa]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
inplace: bool
If True, override input arrays with corrected data
Returns
-------
area_um_corr: ndarray
Corrected cell area [µm²]
deform_corr: ndarray
Deformation (a copy if `inplace` is False)
emodulus_corr: ndarray
Corrected emodulus [kPa]
"""
copy = not inplace
# make sure area_um_corr is not an integer array
area_um_corr = np.array(area_um, dtype=float, copy=copy)
deform_corr = np.array(deform, copy=copy)
emodulus_corr = np.array(emodulus, copy=copy)
if channel_width_in != channel_width_out:
area_um_corr *= (channel_width_out / channel_width_in)**2
if (flow_rate_in != flow_rate_out or
viscosity_in != viscosity_out or
channel_width_in != channel_width_out):
emodulus_corr *= (flow_rate_out / flow_rate_in) \
* (viscosity_out / viscosity_in) \
* (channel_width_in / channel_width_out)**3
return area_um_corr, deform_corr, emodulus_corr | python | def convert(area_um, deform, emodulus,
channel_width_in, channel_width_out,
flow_rate_in, flow_rate_out,
viscosity_in, viscosity_out,
inplace=False):
"""convert area-deformation-emodulus triplet
The conversion formula is described in :cite:`Mietke2015`.
Parameters
----------
area_um: ndarray
Convex cell area [µm²]
deform: ndarray
Deformation
emodulus: ndarray
Young's Modulus [kPa]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
inplace: bool
If True, override input arrays with corrected data
Returns
-------
area_um_corr: ndarray
Corrected cell area [µm²]
deform_corr: ndarray
Deformation (a copy if `inplace` is False)
emodulus_corr: ndarray
Corrected emodulus [kPa]
"""
copy = not inplace
# make sure area_um_corr is not an integer array
area_um_corr = np.array(area_um, dtype=float, copy=copy)
deform_corr = np.array(deform, copy=copy)
emodulus_corr = np.array(emodulus, copy=copy)
if channel_width_in != channel_width_out:
area_um_corr *= (channel_width_out / channel_width_in)**2
if (flow_rate_in != flow_rate_out or
viscosity_in != viscosity_out or
channel_width_in != channel_width_out):
emodulus_corr *= (flow_rate_out / flow_rate_in) \
* (viscosity_out / viscosity_in) \
* (channel_width_in / channel_width_out)**3
return area_um_corr, deform_corr, emodulus_corr | [
"def",
"convert",
"(",
"area_um",
",",
"deform",
",",
"emodulus",
",",
"channel_width_in",
",",
"channel_width_out",
",",
"flow_rate_in",
",",
"flow_rate_out",
",",
"viscosity_in",
",",
"viscosity_out",
",",
"inplace",
"=",
"False",
")",
":",
"copy",
"=",
"not... | convert area-deformation-emodulus triplet
The conversion formula is described in :cite:`Mietke2015`.
Parameters
----------
area_um: ndarray
Convex cell area [µm²]
deform: ndarray
Deformation
emodulus: ndarray
Young's Modulus [kPa]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
inplace: bool
If True, override input arrays with corrected data
Returns
-------
area_um_corr: ndarray
Corrected cell area [µm²]
deform_corr: ndarray
Deformation (a copy if `inplace` is False)
emodulus_corr: ndarray
Corrected emodulus [kPa] | [
"convert",
"area",
"-",
"deformation",
"-",
"emodulus",
"triplet"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/emodulus.py#L15-L72 | train | 48,623 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/emodulus.py | corrpix_deform_delta | def corrpix_deform_delta(area_um, px_um=0.34):
"""Deformation correction term for pixelation effects
The contour in RT-DC measurements is computed on a
pixelated grid. Due to sampling problems, the measured
deformation is overestimated and must be corrected.
The correction formula is described in :cite:`Herold2017`.
Parameters
----------
area_um: float or ndarray
Apparent (2D image) area in µm² of the event(s)
px_um: float
The detector pixel size in µm.
inplace: bool
Change the deformation values in-place
Returns
-------
deform_delta: float or ndarray
Error of the deformation of the event(s) that must be
subtracted from `deform`.
deform_corr = deform - deform_delta
"""
# A triple-exponential decay can be used to correct for pixelation
# for apparent cell areas between 10 and 1250µm².
# For 99 different radii between 0.4 μm and 20 μm circular objects were
# simulated on a pixel grid with the pixel resolution of 340 nm/pix. At
# each radius 1000 random starting points were created and the
# obtained contours were analyzed in the same fashion as RT-DC data.
# A convex hull on the contour was used to calculate the size (as area)
# and the deformation.
# The pixel size correction `pxcorr` takes into account the pixel size
# in the pixelation correction formula.
pxcorr = (.34 / px_um)**2
offs = 0.0012
exp1 = 0.020 * np.exp(-area_um * pxcorr / 7.1)
exp2 = 0.010 * np.exp(-area_um * pxcorr / 38.6)
exp3 = 0.005 * np.exp(-area_um * pxcorr / 296)
delta = offs + exp1 + exp2 + exp3
return delta | python | def corrpix_deform_delta(area_um, px_um=0.34):
"""Deformation correction term for pixelation effects
The contour in RT-DC measurements is computed on a
pixelated grid. Due to sampling problems, the measured
deformation is overestimated and must be corrected.
The correction formula is described in :cite:`Herold2017`.
Parameters
----------
area_um: float or ndarray
Apparent (2D image) area in µm² of the event(s)
px_um: float
The detector pixel size in µm.
inplace: bool
Change the deformation values in-place
Returns
-------
deform_delta: float or ndarray
Error of the deformation of the event(s) that must be
subtracted from `deform`.
deform_corr = deform - deform_delta
"""
# A triple-exponential decay can be used to correct for pixelation
# for apparent cell areas between 10 and 1250µm².
# For 99 different radii between 0.4 μm and 20 μm circular objects were
# simulated on a pixel grid with the pixel resolution of 340 nm/pix. At
# each radius 1000 random starting points were created and the
# obtained contours were analyzed in the same fashion as RT-DC data.
# A convex hull on the contour was used to calculate the size (as area)
# and the deformation.
# The pixel size correction `pxcorr` takes into account the pixel size
# in the pixelation correction formula.
pxcorr = (.34 / px_um)**2
offs = 0.0012
exp1 = 0.020 * np.exp(-area_um * pxcorr / 7.1)
exp2 = 0.010 * np.exp(-area_um * pxcorr / 38.6)
exp3 = 0.005 * np.exp(-area_um * pxcorr / 296)
delta = offs + exp1 + exp2 + exp3
return delta | [
"def",
"corrpix_deform_delta",
"(",
"area_um",
",",
"px_um",
"=",
"0.34",
")",
":",
"# A triple-exponential decay can be used to correct for pixelation",
"# for apparent cell areas between 10 and 1250µm².",
"# For 99 different radii between 0.4 μm and 20 μm circular objects were",
"# simul... | Deformation correction term for pixelation effects
The contour in RT-DC measurements is computed on a
pixelated grid. Due to sampling problems, the measured
deformation is overestimated and must be corrected.
The correction formula is described in :cite:`Herold2017`.
Parameters
----------
area_um: float or ndarray
Apparent (2D image) area in µm² of the event(s)
px_um: float
The detector pixel size in µm.
inplace: bool
Change the deformation values in-place
Returns
-------
deform_delta: float or ndarray
Error of the deformation of the event(s) that must be
subtracted from `deform`.
deform_corr = deform - deform_delta | [
"Deformation",
"correction",
"term",
"for",
"pixelation",
"effects"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/emodulus.py#L75-L117 | train | 48,624 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/emodulus.py | get_emodulus | def get_emodulus(area_um, deform, medium="CellCarrier",
channel_width=20.0, flow_rate=0.16, px_um=0.34,
temperature=23.0, copy=True):
"""Compute apparent Young's modulus using a look-up table
Parameters
----------
area_um: float or ndarray
Apparent (2D image) area [µm²] of the event(s)
deform: float or ndarray
The deformation (1-circularity) of the event(s)
medium: str or float
The medium to compute the viscosity for. If a string
in ["CellCarrier", "CellCarrier B"] is given, the viscosity
will be computed. If a float is given, this value will be
used as the viscosity in mPa*s.
channel_width: float
The channel width [µm]
flow_rate: float
Flow rate [µl/s]
px_um: float
The detector pixel size [µm] used for pixelation correction.
Set to zero to disable.
temperature: float or ndarray
Temperature [°C] of the event(s)
copy: bool
Copy input arrays. If set to false, input arrays are
overridden.
Returns
-------
elasticity: float or ndarray
Apparent Young's modulus in kPa
Notes
-----
- The look-up table used was computed with finite elements methods
according to :cite:`Mokbel2017`.
- The computation of the Young's modulus takes into account
corrections for the viscosity (medium, channel width, flow rate,
and temperature) :cite:`Mietke2015` and corrections for
pixelation of the area and the deformation which are computed
from a (pixelated) image :cite:`Herold2017`.
See Also
--------
dclab.features.emodulus_viscosity.get_viscosity: compute viscosity
for known media
"""
# copy input arrays so we can use in-place calculations
deform = np.array(deform, copy=copy, dtype=float)
area_um = np.array(area_um, copy=copy, dtype=float)
# Get lut data
lut_path = resource_filename("dclab.features", "emodulus_lut.txt")
with pathlib.Path(lut_path).open("rb") as lufd:
lut = np.loadtxt(lufd)
# These meta data are the simulation parameters of the lut
lut_channel_width = 20.0
lut_flow_rate = 0.04
lut_visco = 15.0
# Compute viscosity
if isinstance(medium, (float, int)):
visco = medium
else:
visco = get_viscosity(medium=medium, channel_width=channel_width,
flow_rate=flow_rate, temperature=temperature)
# Corrections
# We correct the lut, because it contains less points than
# the event data. Furthermore, the lut could be cached
# in the future, if this takes up a lot of time.
convert(area_um=lut[:, 0],
deform=lut[:, 1],
emodulus=lut[:, 2],
channel_width_in=lut_channel_width,
channel_width_out=channel_width,
flow_rate_in=lut_flow_rate,
flow_rate_out=flow_rate,
viscosity_in=lut_visco,
viscosity_out=visco,
inplace=True)
if px_um:
# Correct deformation for pixelation effect (subtract ddelt).
ddelt = corrpix_deform_delta(area_um=area_um, px_um=px_um)
deform -= ddelt
# Normalize interpolation data such that the spacing for
# area and deformation is about the same during interpolation.
area_norm = lut[:, 0].max()
normalize(lut[:, 0], area_norm)
normalize(area_um, area_norm)
defo_norm = lut[:, 1].max()
normalize(lut[:, 1], defo_norm)
normalize(deform, defo_norm)
# Perform interpolation
emod = spint.griddata((lut[:, 0], lut[:, 1]), lut[:, 2],
(area_um, deform),
method='linear')
return emod | python | def get_emodulus(area_um, deform, medium="CellCarrier",
channel_width=20.0, flow_rate=0.16, px_um=0.34,
temperature=23.0, copy=True):
"""Compute apparent Young's modulus using a look-up table
Parameters
----------
area_um: float or ndarray
Apparent (2D image) area [µm²] of the event(s)
deform: float or ndarray
The deformation (1-circularity) of the event(s)
medium: str or float
The medium to compute the viscosity for. If a string
in ["CellCarrier", "CellCarrier B"] is given, the viscosity
will be computed. If a float is given, this value will be
used as the viscosity in mPa*s.
channel_width: float
The channel width [µm]
flow_rate: float
Flow rate [µl/s]
px_um: float
The detector pixel size [µm] used for pixelation correction.
Set to zero to disable.
temperature: float or ndarray
Temperature [°C] of the event(s)
copy: bool
Copy input arrays. If set to false, input arrays are
overridden.
Returns
-------
elasticity: float or ndarray
Apparent Young's modulus in kPa
Notes
-----
- The look-up table used was computed with finite elements methods
according to :cite:`Mokbel2017`.
- The computation of the Young's modulus takes into account
corrections for the viscosity (medium, channel width, flow rate,
and temperature) :cite:`Mietke2015` and corrections for
pixelation of the area and the deformation which are computed
from a (pixelated) image :cite:`Herold2017`.
See Also
--------
dclab.features.emodulus_viscosity.get_viscosity: compute viscosity
for known media
"""
# copy input arrays so we can use in-place calculations
deform = np.array(deform, copy=copy, dtype=float)
area_um = np.array(area_um, copy=copy, dtype=float)
# Get lut data
lut_path = resource_filename("dclab.features", "emodulus_lut.txt")
with pathlib.Path(lut_path).open("rb") as lufd:
lut = np.loadtxt(lufd)
# These meta data are the simulation parameters of the lut
lut_channel_width = 20.0
lut_flow_rate = 0.04
lut_visco = 15.0
# Compute viscosity
if isinstance(medium, (float, int)):
visco = medium
else:
visco = get_viscosity(medium=medium, channel_width=channel_width,
flow_rate=flow_rate, temperature=temperature)
# Corrections
# We correct the lut, because it contains less points than
# the event data. Furthermore, the lut could be cached
# in the future, if this takes up a lot of time.
convert(area_um=lut[:, 0],
deform=lut[:, 1],
emodulus=lut[:, 2],
channel_width_in=lut_channel_width,
channel_width_out=channel_width,
flow_rate_in=lut_flow_rate,
flow_rate_out=flow_rate,
viscosity_in=lut_visco,
viscosity_out=visco,
inplace=True)
if px_um:
# Correct deformation for pixelation effect (subtract ddelt).
ddelt = corrpix_deform_delta(area_um=area_um, px_um=px_um)
deform -= ddelt
# Normalize interpolation data such that the spacing for
# area and deformation is about the same during interpolation.
area_norm = lut[:, 0].max()
normalize(lut[:, 0], area_norm)
normalize(area_um, area_norm)
defo_norm = lut[:, 1].max()
normalize(lut[:, 1], defo_norm)
normalize(deform, defo_norm)
# Perform interpolation
emod = spint.griddata((lut[:, 0], lut[:, 1]), lut[:, 2],
(area_um, deform),
method='linear')
return emod | [
"def",
"get_emodulus",
"(",
"area_um",
",",
"deform",
",",
"medium",
"=",
"\"CellCarrier\"",
",",
"channel_width",
"=",
"20.0",
",",
"flow_rate",
"=",
"0.16",
",",
"px_um",
"=",
"0.34",
",",
"temperature",
"=",
"23.0",
",",
"copy",
"=",
"True",
")",
":",... | Compute apparent Young's modulus using a look-up table
Parameters
----------
area_um: float or ndarray
Apparent (2D image) area [µm²] of the event(s)
deform: float or ndarray
The deformation (1-circularity) of the event(s)
medium: str or float
The medium to compute the viscosity for. If a string
in ["CellCarrier", "CellCarrier B"] is given, the viscosity
will be computed. If a float is given, this value will be
used as the viscosity in mPa*s.
channel_width: float
The channel width [µm]
flow_rate: float
Flow rate [µl/s]
px_um: float
The detector pixel size [µm] used for pixelation correction.
Set to zero to disable.
temperature: float or ndarray
Temperature [°C] of the event(s)
copy: bool
Copy input arrays. If set to false, input arrays are
overridden.
Returns
-------
elasticity: float or ndarray
Apparent Young's modulus in kPa
Notes
-----
- The look-up table used was computed with finite elements methods
according to :cite:`Mokbel2017`.
- The computation of the Young's modulus takes into account
corrections for the viscosity (medium, channel width, flow rate,
and temperature) :cite:`Mietke2015` and corrections for
pixelation of the area and the deformation which are computed
from a (pixelated) image :cite:`Herold2017`.
See Also
--------
dclab.features.emodulus_viscosity.get_viscosity: compute viscosity
for known media | [
"Compute",
"apparent",
"Young",
"s",
"modulus",
"using",
"a",
"look",
"-",
"up",
"table"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/emodulus.py#L120-L220 | train | 48,625 |
xenon-middleware/pyxenon | xenon/exceptions.py | make_exception | def make_exception(method, e):
"""Creates an exception for a given method, and RpcError."""
x = e.details()
name = x[:x.find(':')].split('.')[-1]
if name in globals():
cls = globals()[name]
else:
cls = UnknownRpcException # noqa
return cls(method, e.code(), e.details()) | python | def make_exception(method, e):
"""Creates an exception for a given method, and RpcError."""
x = e.details()
name = x[:x.find(':')].split('.')[-1]
if name in globals():
cls = globals()[name]
else:
cls = UnknownRpcException # noqa
return cls(method, e.code(), e.details()) | [
"def",
"make_exception",
"(",
"method",
",",
"e",
")",
":",
"x",
"=",
"e",
".",
"details",
"(",
")",
"name",
"=",
"x",
"[",
":",
"x",
".",
"find",
"(",
"':'",
")",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"name",
"in",
... | Creates an exception for a given method, and RpcError. | [
"Creates",
"an",
"exception",
"for",
"a",
"given",
"method",
"and",
"RpcError",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/exceptions.py#L18-L27 | train | 48,626 |
robmcmullen/atrcopy | atrcopy/utils.py | text_to_int | def text_to_int(text, default_base="hex"):
""" Convert text to int, raising exeception on invalid input
"""
if text.startswith("0x"):
value = int(text[2:], 16)
elif text.startswith("$"):
value = int(text[1:], 16)
elif text.startswith("#"):
value = int(text[1:], 10)
elif text.startswith("%"):
value = int(text[1:], 2)
else:
if default_base == "dec":
value = int(text)
else:
value = int(text, 16)
return value | python | def text_to_int(text, default_base="hex"):
""" Convert text to int, raising exeception on invalid input
"""
if text.startswith("0x"):
value = int(text[2:], 16)
elif text.startswith("$"):
value = int(text[1:], 16)
elif text.startswith("#"):
value = int(text[1:], 10)
elif text.startswith("%"):
value = int(text[1:], 2)
else:
if default_base == "dec":
value = int(text)
else:
value = int(text, 16)
return value | [
"def",
"text_to_int",
"(",
"text",
",",
"default_base",
"=",
"\"hex\"",
")",
":",
"if",
"text",
".",
"startswith",
"(",
"\"0x\"",
")",
":",
"value",
"=",
"int",
"(",
"text",
"[",
"2",
":",
"]",
",",
"16",
")",
"elif",
"text",
".",
"startswith",
"("... | Convert text to int, raising exeception on invalid input | [
"Convert",
"text",
"to",
"int",
"raising",
"exeception",
"on",
"invalid",
"input"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/utils.py#L44-L60 | train | 48,627 |
robmcmullen/atrcopy | atrcopy/utils.py | VTOC.assign_sector_numbers | def assign_sector_numbers(self, dirent, sector_list):
""" Map out the sectors and link the sectors together
raises NotEnoughSpaceOnDisk if the whole file won't fit. It will not
allow partial writes.
"""
num = len(sector_list)
order = self.reserve_space(num)
if len(order) != num:
raise errors.InvalidFile("VTOC reserved space for %d sectors. Sectors needed: %d" % (len(order), num))
file_length = 0
last_sector = None
for sector, sector_num in zip(sector_list.sectors, order):
sector.sector_num = sector_num
sector.file_num = dirent.file_num
file_length += sector.used
if last_sector is not None:
last_sector.next_sector_num = sector_num
last_sector = sector
if last_sector is not None:
last_sector.next_sector_num = 0
sector_list.file_length = file_length | python | def assign_sector_numbers(self, dirent, sector_list):
""" Map out the sectors and link the sectors together
raises NotEnoughSpaceOnDisk if the whole file won't fit. It will not
allow partial writes.
"""
num = len(sector_list)
order = self.reserve_space(num)
if len(order) != num:
raise errors.InvalidFile("VTOC reserved space for %d sectors. Sectors needed: %d" % (len(order), num))
file_length = 0
last_sector = None
for sector, sector_num in zip(sector_list.sectors, order):
sector.sector_num = sector_num
sector.file_num = dirent.file_num
file_length += sector.used
if last_sector is not None:
last_sector.next_sector_num = sector_num
last_sector = sector
if last_sector is not None:
last_sector.next_sector_num = 0
sector_list.file_length = file_length | [
"def",
"assign_sector_numbers",
"(",
"self",
",",
"dirent",
",",
"sector_list",
")",
":",
"num",
"=",
"len",
"(",
"sector_list",
")",
"order",
"=",
"self",
".",
"reserve_space",
"(",
"num",
")",
"if",
"len",
"(",
"order",
")",
"!=",
"num",
":",
"raise"... | Map out the sectors and link the sectors together
raises NotEnoughSpaceOnDisk if the whole file won't fit. It will not
allow partial writes. | [
"Map",
"out",
"the",
"sectors",
"and",
"link",
"the",
"sectors",
"together"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/utils.py#L322-L343 | train | 48,628 |
ZELLMECHANIK-DRESDEN/dclab | dclab/downsampling.py | downsample_rand | def downsample_rand(a, samples, remove_invalid=False, ret_idx=False):
"""Downsampling by randomly removing points
Parameters
----------
a: 1d ndarray
The input array to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a`.
Returns
-------
dsa: 1d ndarray of size `samples`
The pseudo-randomly downsampled array `a`
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
np.random.set_state(rs)
samples = int(samples)
if remove_invalid:
# slice out nans and infs
bad = np.isnan(a) | np.isinf(a)
pool = a[~bad]
else:
pool = a
if samples and (samples < pool.shape[0]):
keep = np.zeros_like(pool, dtype=bool)
keep_ids = np.random.choice(np.arange(pool.size),
size=samples,
replace=False)
keep[keep_ids] = True
dsa = pool[keep]
else:
keep = np.ones_like(pool, dtype=bool)
dsa = pool
if remove_invalid:
# translate the kept values back to the original array
idx = np.zeros(a.size, dtype=bool)
idx[~bad] = keep
else:
idx = keep
if ret_idx:
return dsa, idx
else:
return dsa | python | def downsample_rand(a, samples, remove_invalid=False, ret_idx=False):
"""Downsampling by randomly removing points
Parameters
----------
a: 1d ndarray
The input array to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a`.
Returns
-------
dsa: 1d ndarray of size `samples`
The pseudo-randomly downsampled array `a`
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
np.random.set_state(rs)
samples = int(samples)
if remove_invalid:
# slice out nans and infs
bad = np.isnan(a) | np.isinf(a)
pool = a[~bad]
else:
pool = a
if samples and (samples < pool.shape[0]):
keep = np.zeros_like(pool, dtype=bool)
keep_ids = np.random.choice(np.arange(pool.size),
size=samples,
replace=False)
keep[keep_ids] = True
dsa = pool[keep]
else:
keep = np.ones_like(pool, dtype=bool)
dsa = pool
if remove_invalid:
# translate the kept values back to the original array
idx = np.zeros(a.size, dtype=bool)
idx[~bad] = keep
else:
idx = keep
if ret_idx:
return dsa, idx
else:
return dsa | [
"def",
"downsample_rand",
"(",
"a",
",",
"samples",
",",
"remove_invalid",
"=",
"False",
",",
"ret_idx",
"=",
"False",
")",
":",
"# fixed random state for this method",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
"=",
"47",
")",
".",
"... | Downsampling by randomly removing points
Parameters
----------
a: 1d ndarray
The input array to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a`.
Returns
-------
dsa: 1d ndarray of size `samples`
The pseudo-randomly downsampled array `a`
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa` | [
"Downsampling",
"by",
"randomly",
"removing",
"points"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L11-L68 | train | 48,629 |
ZELLMECHANIK-DRESDEN/dclab | dclab/downsampling.py | downsample_grid | def downsample_grid(a, b, samples, ret_idx=False):
"""Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
samples = int(samples)
if samples and samples < a.size:
# The events to keep
keep = np.zeros_like(a, dtype=bool)
# 1. Produce evenly distributed samples
# Choosing grid-size:
# - large numbers tend to show actual structures of the sample,
# which is not desired for plotting
# - small numbers tend will not result in too few samples and,
# in order to reach the desired samples, the data must be
# upsampled again.
# 300 is about the size of the plot in marker sizes and yields
# good results.
grid_size = 300
xpx = norm(a, a, b) * grid_size
ypx = norm(b, b, a) * grid_size
# The events on the grid to process
toproc = np.ones((grid_size, grid_size), dtype=bool)
for ii in range(xpx.size):
xi = xpx[ii]
yi = ypx[ii]
# filter for overlapping events
if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]:
toproc[int(xi-1), int(yi-1)] = False
# include event
keep[ii] = True
# 2. Make sure that we reach `samples` by adding or
# removing events.
diff = np.sum(keep) - samples
if diff > 0:
# Too many samples
rem_indices = np.where(keep)[0]
np.random.set_state(rs)
rem = np.random.choice(rem_indices,
size=diff,
replace=False)
keep[rem] = False
elif diff < 0:
# Not enough samples
add_indices = np.where(~keep)[0]
np.random.set_state(rs)
add = np.random.choice(add_indices,
size=abs(diff),
replace=False)
keep[add] = True
assert np.sum(keep) == samples, "sanity check"
asd = a[keep]
bsd = b[keep]
assert np.allclose(a[keep], asd, equal_nan=True), "sanity check"
assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check"
else:
keep = np.ones_like(a, dtype=bool)
asd = a
bsd = b
if ret_idx:
return asd, bsd, keep
else:
return asd, bsd | python | def downsample_grid(a, b, samples, ret_idx=False):
"""Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
samples = int(samples)
if samples and samples < a.size:
# The events to keep
keep = np.zeros_like(a, dtype=bool)
# 1. Produce evenly distributed samples
# Choosing grid-size:
# - large numbers tend to show actual structures of the sample,
# which is not desired for plotting
# - small numbers tend will not result in too few samples and,
# in order to reach the desired samples, the data must be
# upsampled again.
# 300 is about the size of the plot in marker sizes and yields
# good results.
grid_size = 300
xpx = norm(a, a, b) * grid_size
ypx = norm(b, b, a) * grid_size
# The events on the grid to process
toproc = np.ones((grid_size, grid_size), dtype=bool)
for ii in range(xpx.size):
xi = xpx[ii]
yi = ypx[ii]
# filter for overlapping events
if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]:
toproc[int(xi-1), int(yi-1)] = False
# include event
keep[ii] = True
# 2. Make sure that we reach `samples` by adding or
# removing events.
diff = np.sum(keep) - samples
if diff > 0:
# Too many samples
rem_indices = np.where(keep)[0]
np.random.set_state(rs)
rem = np.random.choice(rem_indices,
size=diff,
replace=False)
keep[rem] = False
elif diff < 0:
# Not enough samples
add_indices = np.where(~keep)[0]
np.random.set_state(rs)
add = np.random.choice(add_indices,
size=abs(diff),
replace=False)
keep[add] = True
assert np.sum(keep) == samples, "sanity check"
asd = a[keep]
bsd = b[keep]
assert np.allclose(a[keep], asd, equal_nan=True), "sanity check"
assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check"
else:
keep = np.ones_like(a, dtype=bool)
asd = a
bsd = b
if ret_idx:
return asd, bsd, keep
else:
return asd, bsd | [
"def",
"downsample_grid",
"(",
"a",
",",
"b",
",",
"samples",
",",
"ret_idx",
"=",
"False",
")",
":",
"# fixed random state for this method",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
"=",
"47",
")",
".",
"get_state",
"(",
")",
"sa... | Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa` | [
"Content",
"-",
"based",
"downsampling",
"for",
"faster",
"visualization"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L72-L167 | train | 48,630 |
ZELLMECHANIK-DRESDEN/dclab | dclab/downsampling.py | valid | def valid(a, b):
"""Check whether `a` and `b` are not inf or nan"""
return ~(np.isnan(a) | np.isinf(a) | np.isnan(b) | np.isinf(b)) | python | def valid(a, b):
"""Check whether `a` and `b` are not inf or nan"""
return ~(np.isnan(a) | np.isinf(a) | np.isnan(b) | np.isinf(b)) | [
"def",
"valid",
"(",
"a",
",",
"b",
")",
":",
"return",
"~",
"(",
"np",
".",
"isnan",
"(",
"a",
")",
"|",
"np",
".",
"isinf",
"(",
"a",
")",
"|",
"np",
".",
"isnan",
"(",
"b",
")",
"|",
"np",
".",
"isinf",
"(",
"b",
")",
")"
] | Check whether `a` and `b` are not inf or nan | [
"Check",
"whether",
"a",
"and",
"b",
"are",
"not",
"inf",
"or",
"nan"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L170-L172 | train | 48,631 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hdf5.py | RTDC_HDF5.parse_config | def parse_config(h5path):
"""Parse the RT-DC configuration of an hdf5 file"""
with h5py.File(h5path, mode="r") as fh5:
h5attrs = dict(fh5.attrs)
# Convert byte strings to unicode strings
# https://github.com/h5py/h5py/issues/379
for key in h5attrs:
if isinstance(h5attrs[key], bytes):
h5attrs[key] = h5attrs[key].decode("utf-8")
config = Configuration()
for key in h5attrs:
section, pname = key.split(":")
if pname not in dfn.config_funcs[section]:
# Add the value as a string but issue a warning
config[section][pname] = h5attrs[key]
msg = "Unknown key '{}' in section [{}]!".format(
pname, section)
warnings.warn(msg, UnknownKeyWarning)
else:
typ = dfn.config_funcs[section][pname]
config[section][pname] = typ(h5attrs[key])
return config | python | def parse_config(h5path):
"""Parse the RT-DC configuration of an hdf5 file"""
with h5py.File(h5path, mode="r") as fh5:
h5attrs = dict(fh5.attrs)
# Convert byte strings to unicode strings
# https://github.com/h5py/h5py/issues/379
for key in h5attrs:
if isinstance(h5attrs[key], bytes):
h5attrs[key] = h5attrs[key].decode("utf-8")
config = Configuration()
for key in h5attrs:
section, pname = key.split(":")
if pname not in dfn.config_funcs[section]:
# Add the value as a string but issue a warning
config[section][pname] = h5attrs[key]
msg = "Unknown key '{}' in section [{}]!".format(
pname, section)
warnings.warn(msg, UnknownKeyWarning)
else:
typ = dfn.config_funcs[section][pname]
config[section][pname] = typ(h5attrs[key])
return config | [
"def",
"parse_config",
"(",
"h5path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"h5path",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"fh5",
":",
"h5attrs",
"=",
"dict",
"(",
"fh5",
".",
"attrs",
")",
"# Convert byte strings to unicode strings",
"# https://githu... | Parse the RT-DC configuration of an hdf5 file | [
"Parse",
"the",
"RT",
"-",
"DC",
"configuration",
"of",
"an",
"hdf5",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hdf5.py#L144-L167 | train | 48,632 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hdf5.py | RTDC_HDF5.hash | def hash(self):
"""Hash value based on file name and content"""
if self._hash is None:
tohash = [self.path.name]
# Hash a maximum of ~1MB of the hdf5 file
tohash.append(hashfile(self.path, blocksize=65536, count=20))
self._hash = hashobj(tohash)
return self._hash | python | def hash(self):
"""Hash value based on file name and content"""
if self._hash is None:
tohash = [self.path.name]
# Hash a maximum of ~1MB of the hdf5 file
tohash.append(hashfile(self.path, blocksize=65536, count=20))
self._hash = hashobj(tohash)
return self._hash | [
"def",
"hash",
"(",
"self",
")",
":",
"if",
"self",
".",
"_hash",
"is",
"None",
":",
"tohash",
"=",
"[",
"self",
".",
"path",
".",
"name",
"]",
"# Hash a maximum of ~1MB of the hdf5 file",
"tohash",
".",
"append",
"(",
"hashfile",
"(",
"self",
".",
"path... | Hash value based on file name and content | [
"Hash",
"value",
"based",
"on",
"file",
"name",
"and",
"content"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hdf5.py#L170-L177 | train | 48,633 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_methods.py | bin_num_doane | def bin_num_doane(a):
"""Compute number of bins based on Doane's formula"""
bad = np.isnan(a) | np.isinf(a)
data = a[~bad]
acc = bin_width_doane(a)
num = np.int(np.round((data.max() - data.min()) / acc))
return num | python | def bin_num_doane(a):
"""Compute number of bins based on Doane's formula"""
bad = np.isnan(a) | np.isinf(a)
data = a[~bad]
acc = bin_width_doane(a)
num = np.int(np.round((data.max() - data.min()) / acc))
return num | [
"def",
"bin_num_doane",
"(",
"a",
")",
":",
"bad",
"=",
"np",
".",
"isnan",
"(",
"a",
")",
"|",
"np",
".",
"isinf",
"(",
"a",
")",
"data",
"=",
"a",
"[",
"~",
"bad",
"]",
"acc",
"=",
"bin_width_doane",
"(",
"a",
")",
"num",
"=",
"np",
".",
... | Compute number of bins based on Doane's formula | [
"Compute",
"number",
"of",
"bins",
"based",
"on",
"Doane",
"s",
"formula"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L14-L20 | train | 48,634 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_methods.py | ignore_nan_inf | def ignore_nan_inf(kde_method):
"""Ignores nans and infs from the input data
Invalid positions in the resulting density are set to nan.
"""
def new_kde_method(events_x, events_y, xout=None, yout=None,
*args, **kwargs):
bad_in = get_bad_vals(events_x, events_y)
if xout is None:
density = np.zeros_like(events_x, dtype=float)
bad_out = bad_in
xo = yo = None
else:
density = np.zeros_like(xout, dtype=float)
bad_out = get_bad_vals(xout, yout)
xo = xout[~bad_out]
yo = yout[~bad_out]
# Filter events
ev_x = events_x[~bad_in]
ev_y = events_y[~bad_in]
density[~bad_out] = kde_method(ev_x, ev_y,
xo, yo,
*args, **kwargs)
density[bad_out] = np.nan
return density
doc_add = "\n Notes\n" +\
" -----\n" +\
" This is a wrapped version that ignores nan and inf values."
new_kde_method.__doc__ = kde_method.__doc__ + doc_add
return new_kde_method | python | def ignore_nan_inf(kde_method):
"""Ignores nans and infs from the input data
Invalid positions in the resulting density are set to nan.
"""
def new_kde_method(events_x, events_y, xout=None, yout=None,
*args, **kwargs):
bad_in = get_bad_vals(events_x, events_y)
if xout is None:
density = np.zeros_like(events_x, dtype=float)
bad_out = bad_in
xo = yo = None
else:
density = np.zeros_like(xout, dtype=float)
bad_out = get_bad_vals(xout, yout)
xo = xout[~bad_out]
yo = yout[~bad_out]
# Filter events
ev_x = events_x[~bad_in]
ev_y = events_y[~bad_in]
density[~bad_out] = kde_method(ev_x, ev_y,
xo, yo,
*args, **kwargs)
density[bad_out] = np.nan
return density
doc_add = "\n Notes\n" +\
" -----\n" +\
" This is a wrapped version that ignores nan and inf values."
new_kde_method.__doc__ = kde_method.__doc__ + doc_add
return new_kde_method | [
"def",
"ignore_nan_inf",
"(",
"kde_method",
")",
":",
"def",
"new_kde_method",
"(",
"events_x",
",",
"events_y",
",",
"xout",
"=",
"None",
",",
"yout",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"bad_in",
"=",
"get_bad_vals",
"("... | Ignores nans and infs from the input data
Invalid positions in the resulting density are set to nan. | [
"Ignores",
"nans",
"and",
"infs",
"from",
"the",
"input",
"data"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L46-L77 | train | 48,635 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_methods.py | kde_gauss | def kde_gauss(events_x, events_y, xout=None, yout=None):
""" Gaussian Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`scipy.stats.gaussian_kde`
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
try:
estimator = gaussian_kde([events_x.flatten(), events_y.flatten()])
density = estimator.evaluate([xout.flatten(), yout.flatten()])
except np.linalg.LinAlgError:
# LinAlgError occurs when matrix to solve is singular (issue #117)
density = np.zeros(xout.shape)*np.nan
return density.reshape(xout.shape) | python | def kde_gauss(events_x, events_y, xout=None, yout=None):
""" Gaussian Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`scipy.stats.gaussian_kde`
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
try:
estimator = gaussian_kde([events_x.flatten(), events_y.flatten()])
density = estimator.evaluate([xout.flatten(), yout.flatten()])
except np.linalg.LinAlgError:
# LinAlgError occurs when matrix to solve is singular (issue #117)
density = np.zeros(xout.shape)*np.nan
return density.reshape(xout.shape) | [
"def",
"kde_gauss",
"(",
"events_x",
",",
"events_y",
",",
"xout",
"=",
"None",
",",
"yout",
"=",
"None",
")",
":",
"valid_combi",
"=",
"(",
"(",
"xout",
"is",
"None",
"and",
"yout",
"is",
"None",
")",
"or",
"(",
"xout",
"is",
"not",
"None",
"and",... | Gaussian Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`scipy.stats.gaussian_kde` | [
"Gaussian",
"Kernel",
"Density",
"Estimation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L82-L119 | train | 48,636 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_methods.py | kde_histogram | def kde_histogram(events_x, events_y, xout=None, yout=None, bins=None):
""" Histogram-based Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
bins: tuple (binsx, binsy)
The number of bins to use for the histogram.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`numpy.histogram2d`
`scipy.interpolate.RectBivariateSpline`
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
if bins is None:
bins = (max(5, bin_num_doane(events_x)),
max(5, bin_num_doane(events_y)))
# Compute the histogram
hist2d, xedges, yedges = np.histogram2d(x=events_x,
y=events_y,
bins=bins,
normed=True)
xip = xedges[1:]-(xedges[1]-xedges[0])/2
yip = yedges[1:]-(yedges[1]-yedges[0])/2
estimator = RectBivariateSpline(x=xip, y=yip, z=hist2d)
density = estimator.ev(xout, yout)
density[density < 0] = 0
return density.reshape(xout.shape) | python | def kde_histogram(events_x, events_y, xout=None, yout=None, bins=None):
""" Histogram-based Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
bins: tuple (binsx, binsy)
The number of bins to use for the histogram.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`numpy.histogram2d`
`scipy.interpolate.RectBivariateSpline`
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
if bins is None:
bins = (max(5, bin_num_doane(events_x)),
max(5, bin_num_doane(events_y)))
# Compute the histogram
hist2d, xedges, yedges = np.histogram2d(x=events_x,
y=events_y,
bins=bins,
normed=True)
xip = xedges[1:]-(xedges[1]-xedges[0])/2
yip = yedges[1:]-(yedges[1]-yedges[0])/2
estimator = RectBivariateSpline(x=xip, y=yip, z=hist2d)
density = estimator.ev(xout, yout)
density[density < 0] = 0
return density.reshape(xout.shape) | [
"def",
"kde_histogram",
"(",
"events_x",
",",
"events_y",
",",
"xout",
"=",
"None",
",",
"yout",
"=",
"None",
",",
"bins",
"=",
"None",
")",
":",
"valid_combi",
"=",
"(",
"(",
"xout",
"is",
"None",
"and",
"yout",
"is",
"None",
")",
"or",
"(",
"xout... | Histogram-based Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
bins: tuple (binsx, binsy)
The number of bins to use for the histogram.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`numpy.histogram2d`
`scipy.interpolate.RectBivariateSpline` | [
"Histogram",
"-",
"based",
"Kernel",
"Density",
"Estimation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L124-L174 | train | 48,637 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_methods.py | kde_none | def kde_none(events_x, events_y, xout=None, yout=None):
""" No Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
Notes
-----
This method is a convenience method that always returns ones in the shape
that the other methods in this module produce.
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
return np.ones(xout.shape) | python | def kde_none(events_x, events_y, xout=None, yout=None):
""" No Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
Notes
-----
This method is a convenience method that always returns ones in the shape
that the other methods in this module produce.
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
return np.ones(xout.shape) | [
"def",
"kde_none",
"(",
"events_x",
",",
"events_y",
",",
"xout",
"=",
"None",
",",
"yout",
"=",
"None",
")",
":",
"valid_combi",
"=",
"(",
"(",
"xout",
"is",
"None",
"and",
"yout",
"is",
"None",
")",
"or",
"(",
"xout",
"is",
"not",
"None",
"and",
... | No Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
Notes
-----
This method is a convenience method that always returns ones in the shape
that the other methods in this module produce. | [
"No",
"Kernel",
"Density",
"Estimation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L177-L209 | train | 48,638 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_methods.py | kde_multivariate | def kde_multivariate(events_x, events_y, xout=None, yout=None, bw=None):
""" Multivariate Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
bw: tuple (bwx, bwy) or None
The bandwith for kernel density estimation.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`statsmodels.nonparametric.kernel_density.KDEMultivariate`
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
if bw is None:
# divide by 2 to make it comparable to histogram KDE
bw = (bin_width_doane(events_x) / 2,
bin_width_doane(events_y) / 2)
positions = np.vstack([xout.flatten(), yout.flatten()])
estimator_ly = KDEMultivariate(data=[events_x.flatten(),
events_y.flatten()],
var_type='cc', bw=bw)
density = estimator_ly.pdf(positions)
return density.reshape(xout.shape) | python | def kde_multivariate(events_x, events_y, xout=None, yout=None, bw=None):
""" Multivariate Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
bw: tuple (bwx, bwy) or None
The bandwith for kernel density estimation.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`statsmodels.nonparametric.kernel_density.KDEMultivariate`
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
if bw is None:
# divide by 2 to make it comparable to histogram KDE
bw = (bin_width_doane(events_x) / 2,
bin_width_doane(events_y) / 2)
positions = np.vstack([xout.flatten(), yout.flatten()])
estimator_ly = KDEMultivariate(data=[events_x.flatten(),
events_y.flatten()],
var_type='cc', bw=bw)
density = estimator_ly.pdf(positions)
return density.reshape(xout.shape) | [
"def",
"kde_multivariate",
"(",
"events_x",
",",
"events_y",
",",
"xout",
"=",
"None",
",",
"yout",
"=",
"None",
",",
"bw",
"=",
"None",
")",
":",
"valid_combi",
"=",
"(",
"(",
"xout",
"is",
"None",
"and",
"yout",
"is",
"None",
")",
"or",
"(",
"xou... | Multivariate Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
bw: tuple (bwx, bwy) or None
The bandwith for kernel density estimation.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`statsmodels.nonparametric.kernel_density.KDEMultivariate` | [
"Multivariate",
"Kernel",
"Density",
"Estimation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L214-L257 | train | 48,639 |
ZELLMECHANIK-DRESDEN/dclab | dclab/isoelastics/__init__.py | Isoelastics._add | def _add(self, isoel, col1, col2, method, meta):
"""Convenience method for population self._data"""
self._data[method][col1][col2]["isoelastics"] = isoel
self._data[method][col1][col2]["meta"] = meta
# Use advanced slicing to flip the data columns
isoel_flip = [iso[:, [1, 0, 2]] for iso in isoel]
self._data[method][col2][col1]["isoelastics"] = isoel_flip
self._data[method][col2][col1]["meta"] = meta | python | def _add(self, isoel, col1, col2, method, meta):
"""Convenience method for population self._data"""
self._data[method][col1][col2]["isoelastics"] = isoel
self._data[method][col1][col2]["meta"] = meta
# Use advanced slicing to flip the data columns
isoel_flip = [iso[:, [1, 0, 2]] for iso in isoel]
self._data[method][col2][col1]["isoelastics"] = isoel_flip
self._data[method][col2][col1]["meta"] = meta | [
"def",
"_add",
"(",
"self",
",",
"isoel",
",",
"col1",
",",
"col2",
",",
"method",
",",
"meta",
")",
":",
"self",
".",
"_data",
"[",
"method",
"]",
"[",
"col1",
"]",
"[",
"col2",
"]",
"[",
"\"isoelastics\"",
"]",
"=",
"isoel",
"self",
".",
"_data... | Convenience method for population self._data | [
"Convenience",
"method",
"for",
"population",
"self",
".",
"_data"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L33-L41 | train | 48,640 |
ZELLMECHANIK-DRESDEN/dclab | dclab/isoelastics/__init__.py | Isoelastics.add_px_err | def add_px_err(isoel, col1, col2, px_um, inplace=False):
"""Undo pixelation correction
Isoelasticity lines are already corrected for pixelation
effects as described in
Mapping of Deformation to Apparent Young's Modulus
in Real-Time Deformability Cytometry
Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017)
https://arxiv.org/abs/1704.00572.
If the isoealsticity lines are displayed with deformation data
that are not corrected, then the lines must be "un"-corrected,
i.e. the pixelation error must be added to the lines to match
the experimental data.
Parameters
----------
isoel: list of 2d ndarrays of shape (N, 3)
Each item in the list corresponds to one isoelasticity
line. The first column is defined by `col1`, the second
by `col2`, and the third column is the emodulus.
col1, col2: str
Define the fist to columns of each isoelasticity line.
One of ["area_um", "circ", "deform"]
px_um: float
Pixel size [µm]
"""
Isoelastics.check_col12(col1, col2)
if "deform" in [col1, col2]:
# add error for deformation
sign = +1
else:
# subtract error for circularity
sign = -1
if col1 == "area_um":
area_ax = 0
deci_ax = 1
else:
area_ax = 1
deci_ax = 0
new_isoel = []
for iso in isoel:
iso = np.array(iso, copy=not inplace)
ddeci = feat_emod.corrpix_deform_delta(area_um=iso[:, area_ax],
px_um=px_um)
iso[:, deci_ax] += sign * ddeci
new_isoel.append(iso)
return new_isoel | python | def add_px_err(isoel, col1, col2, px_um, inplace=False):
"""Undo pixelation correction
Isoelasticity lines are already corrected for pixelation
effects as described in
Mapping of Deformation to Apparent Young's Modulus
in Real-Time Deformability Cytometry
Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017)
https://arxiv.org/abs/1704.00572.
If the isoealsticity lines are displayed with deformation data
that are not corrected, then the lines must be "un"-corrected,
i.e. the pixelation error must be added to the lines to match
the experimental data.
Parameters
----------
isoel: list of 2d ndarrays of shape (N, 3)
Each item in the list corresponds to one isoelasticity
line. The first column is defined by `col1`, the second
by `col2`, and the third column is the emodulus.
col1, col2: str
Define the fist to columns of each isoelasticity line.
One of ["area_um", "circ", "deform"]
px_um: float
Pixel size [µm]
"""
Isoelastics.check_col12(col1, col2)
if "deform" in [col1, col2]:
# add error for deformation
sign = +1
else:
# subtract error for circularity
sign = -1
if col1 == "area_um":
area_ax = 0
deci_ax = 1
else:
area_ax = 1
deci_ax = 0
new_isoel = []
for iso in isoel:
iso = np.array(iso, copy=not inplace)
ddeci = feat_emod.corrpix_deform_delta(area_um=iso[:, area_ax],
px_um=px_um)
iso[:, deci_ax] += sign * ddeci
new_isoel.append(iso)
return new_isoel | [
"def",
"add_px_err",
"(",
"isoel",
",",
"col1",
",",
"col2",
",",
"px_um",
",",
"inplace",
"=",
"False",
")",
":",
"Isoelastics",
".",
"check_col12",
"(",
"col1",
",",
"col2",
")",
"if",
"\"deform\"",
"in",
"[",
"col1",
",",
"col2",
"]",
":",
"# add ... | Undo pixelation correction
Isoelasticity lines are already corrected for pixelation
effects as described in
Mapping of Deformation to Apparent Young's Modulus
in Real-Time Deformability Cytometry
Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017)
https://arxiv.org/abs/1704.00572.
If the isoealsticity lines are displayed with deformation data
that are not corrected, then the lines must be "un"-corrected,
i.e. the pixelation error must be added to the lines to match
the experimental data.
Parameters
----------
isoel: list of 2d ndarrays of shape (N, 3)
Each item in the list corresponds to one isoelasticity
line. The first column is defined by `col1`, the second
by `col2`, and the third column is the emodulus.
col1, col2: str
Define the fist to columns of each isoelasticity line.
One of ["area_um", "circ", "deform"]
px_um: float
Pixel size [µm] | [
"Undo",
"pixelation",
"correction"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L105-L154 | train | 48,641 |
ZELLMECHANIK-DRESDEN/dclab | dclab/isoelastics/__init__.py | Isoelastics.convert | def convert(isoel, col1, col2,
channel_width_in, channel_width_out,
flow_rate_in, flow_rate_out,
viscosity_in, viscosity_out,
inplace=False):
"""Convert isoelastics in area_um-deform space
Parameters
----------
isoel: list of 2d ndarrays of shape (N, 3)
Each item in the list corresponds to one isoelasticity
line. The first column is defined by `col1`, the second
by `col2`, and the third column is the emodulus.
col1, col2: str
Define the fist to columns of each isoelasticity line.
One of ["area_um", "circ", "deform"]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
Notes
-----
If only the positions of the isoelastics are of interest and
not the value of the elastic modulus, then it is sufficient
to supply values for the channel width and set the values
for flow rate and viscosity to a constant (e.g. 1).
See Also
--------
dclab.features.emodulus.convert: conversion method used
"""
Isoelastics.check_col12(col1, col2)
if col1 == "area_um":
area_ax = 0
defo_ax = 1
else:
area_ax = 1
defo_ax = 0
new_isoel = []
for iso in isoel:
iso = np.array(iso, copy=not inplace)
feat_emod.convert(area_um=iso[:, area_ax],
deform=iso[:, defo_ax],
emodulus=iso[:, 2],
channel_width_in=channel_width_in,
channel_width_out=channel_width_out,
flow_rate_in=flow_rate_in,
flow_rate_out=flow_rate_out,
viscosity_in=viscosity_in,
viscosity_out=viscosity_out,
inplace=True)
new_isoel.append(iso)
return new_isoel | python | def convert(isoel, col1, col2,
channel_width_in, channel_width_out,
flow_rate_in, flow_rate_out,
viscosity_in, viscosity_out,
inplace=False):
"""Convert isoelastics in area_um-deform space
Parameters
----------
isoel: list of 2d ndarrays of shape (N, 3)
Each item in the list corresponds to one isoelasticity
line. The first column is defined by `col1`, the second
by `col2`, and the third column is the emodulus.
col1, col2: str
Define the fist to columns of each isoelasticity line.
One of ["area_um", "circ", "deform"]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
Notes
-----
If only the positions of the isoelastics are of interest and
not the value of the elastic modulus, then it is sufficient
to supply values for the channel width and set the values
for flow rate and viscosity to a constant (e.g. 1).
See Also
--------
dclab.features.emodulus.convert: conversion method used
"""
Isoelastics.check_col12(col1, col2)
if col1 == "area_um":
area_ax = 0
defo_ax = 1
else:
area_ax = 1
defo_ax = 0
new_isoel = []
for iso in isoel:
iso = np.array(iso, copy=not inplace)
feat_emod.convert(area_um=iso[:, area_ax],
deform=iso[:, defo_ax],
emodulus=iso[:, 2],
channel_width_in=channel_width_in,
channel_width_out=channel_width_out,
flow_rate_in=flow_rate_in,
flow_rate_out=flow_rate_out,
viscosity_in=viscosity_in,
viscosity_out=viscosity_out,
inplace=True)
new_isoel.append(iso)
return new_isoel | [
"def",
"convert",
"(",
"isoel",
",",
"col1",
",",
"col2",
",",
"channel_width_in",
",",
"channel_width_out",
",",
"flow_rate_in",
",",
"flow_rate_out",
",",
"viscosity_in",
",",
"viscosity_out",
",",
"inplace",
"=",
"False",
")",
":",
"Isoelastics",
".",
"chec... | Convert isoelastics in area_um-deform space
Parameters
----------
isoel: list of 2d ndarrays of shape (N, 3)
Each item in the list corresponds to one isoelasticity
line. The first column is defined by `col1`, the second
by `col2`, and the third column is the emodulus.
col1, col2: str
Define the fist to columns of each isoelasticity line.
One of ["area_um", "circ", "deform"]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
Notes
-----
If only the positions of the isoelastics are of interest and
not the value of the elastic modulus, then it is sufficient
to supply values for the channel width and set the values
for flow rate and viscosity to a constant (e.g. 1).
See Also
--------
dclab.features.emodulus.convert: conversion method used | [
"Convert",
"isoelastics",
"in",
"area_um",
"-",
"deform",
"space"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L168-L231 | train | 48,642 |
ZELLMECHANIK-DRESDEN/dclab | dclab/isoelastics/__init__.py | Isoelastics.get_with_rtdcbase | def get_with_rtdcbase(self, col1, col2, method, dataset,
viscosity=None, add_px_err=False):
"""Convenience method that extracts the metadata from RTDCBase
Parameters
----------
col1: str
Name of the first feature of all isoelastics
(e.g. isoel[0][:,0])
col2: str
Name of the second feature of all isoelastics
(e.g. isoel[0][:,1])
method: str
The method used to compute the isoelastics
(must be one of `VALID_METHODS`).
dataset: dclab.rtdc_dataset.RTDCBase
The dataset from which to obtain the metadata.
viscosity: float or `None`
Viscosity of the medium in mPa*s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
add_px_err: bool
If True, add pixelation errors according to
C. Herold (2017), https://arxiv.org/abs/1704.00572
"""
cfg = dataset.config
return self.get(col1=col1,
col2=col2,
method=method,
channel_width=cfg["setup"]["channel width"],
flow_rate=cfg["setup"]["flow rate"],
viscosity=viscosity,
add_px_err=add_px_err,
px_um=cfg["imaging"]["pixel size"]) | python | def get_with_rtdcbase(self, col1, col2, method, dataset,
viscosity=None, add_px_err=False):
"""Convenience method that extracts the metadata from RTDCBase
Parameters
----------
col1: str
Name of the first feature of all isoelastics
(e.g. isoel[0][:,0])
col2: str
Name of the second feature of all isoelastics
(e.g. isoel[0][:,1])
method: str
The method used to compute the isoelastics
(must be one of `VALID_METHODS`).
dataset: dclab.rtdc_dataset.RTDCBase
The dataset from which to obtain the metadata.
viscosity: float or `None`
Viscosity of the medium in mPa*s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
add_px_err: bool
If True, add pixelation errors according to
C. Herold (2017), https://arxiv.org/abs/1704.00572
"""
cfg = dataset.config
return self.get(col1=col1,
col2=col2,
method=method,
channel_width=cfg["setup"]["channel width"],
flow_rate=cfg["setup"]["flow rate"],
viscosity=viscosity,
add_px_err=add_px_err,
px_um=cfg["imaging"]["pixel size"]) | [
"def",
"get_with_rtdcbase",
"(",
"self",
",",
"col1",
",",
"col2",
",",
"method",
",",
"dataset",
",",
"viscosity",
"=",
"None",
",",
"add_px_err",
"=",
"False",
")",
":",
"cfg",
"=",
"dataset",
".",
"config",
"return",
"self",
".",
"get",
"(",
"col1",... | Convenience method that extracts the metadata from RTDCBase
Parameters
----------
col1: str
Name of the first feature of all isoelastics
(e.g. isoel[0][:,0])
col2: str
Name of the second feature of all isoelastics
(e.g. isoel[0][:,1])
method: str
The method used to compute the isoelastics
(must be one of `VALID_METHODS`).
dataset: dclab.rtdc_dataset.RTDCBase
The dataset from which to obtain the metadata.
viscosity: float or `None`
Viscosity of the medium in mPa*s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
add_px_err: bool
If True, add pixelation errors according to
C. Herold (2017), https://arxiv.org/abs/1704.00572 | [
"Convenience",
"method",
"that",
"extracts",
"the",
"metadata",
"from",
"RTDCBase"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L312-L346 | train | 48,643 |
ZELLMECHANIK-DRESDEN/dclab | dclab/isoelastics/__init__.py | Isoelastics.load_data | def load_data(self, path):
"""Load isoelastics from a text file
The text file is loaded with `numpy.loadtxt` and must have
three columns, representing the two data columns and the
elastic modulus with units defined in `definitions.py`.
The file header must have a section defining meta data of the
content like so:
# [...]
#
# - column 1: area_um
# - column 2: deform
# - column 3: emodulus
# - channel width [um]: 20
# - flow rate [ul/s]: 0.04
# - viscosity [mPa*s]: 15
# - method: analytical
#
# [...]
Parameters
----------
path: str
Path to a isoelastics text file
"""
path = pathlib.Path(path).resolve()
# Get metadata
meta = {}
with path.open() as fd:
while True:
line = fd.readline().strip()
if line.startswith("# - "):
line = line.strip("#- ")
var, val = line.split(":")
if val.strip().replace(".", "").isdigit():
# channel width, flow rate, viscosity
val = float(val)
else:
# columns, calculation
val = val.strip().lower()
meta[var.strip()] = val
elif line and not line.startswith("#"):
break
assert meta["column 1"] in dfn.scalar_feature_names
assert meta["column 2"] in dfn.scalar_feature_names
assert meta["column 3"] == "emodulus"
assert meta["method"] in VALID_METHODS
# Load isoelasics
with path.open("rb") as isfd:
isodata = np.loadtxt(isfd)
# Slice out individual isoelastics
emoduli = np.unique(isodata[:, 2])
isoel = []
for emod in emoduli:
where = isodata[:, 2] == emod
isoel.append(isodata[where])
# Add isoelastics to instance
self.add(isoel=isoel,
col1=meta["column 1"],
col2=meta["column 2"],
channel_width=meta["channel width [um]"],
flow_rate=meta["flow rate [ul/s]"],
viscosity=meta["viscosity [mPa*s]"],
method=meta["method"]) | python | def load_data(self, path):
"""Load isoelastics from a text file
The text file is loaded with `numpy.loadtxt` and must have
three columns, representing the two data columns and the
elastic modulus with units defined in `definitions.py`.
The file header must have a section defining meta data of the
content like so:
# [...]
#
# - column 1: area_um
# - column 2: deform
# - column 3: emodulus
# - channel width [um]: 20
# - flow rate [ul/s]: 0.04
# - viscosity [mPa*s]: 15
# - method: analytical
#
# [...]
Parameters
----------
path: str
Path to a isoelastics text file
"""
path = pathlib.Path(path).resolve()
# Get metadata
meta = {}
with path.open() as fd:
while True:
line = fd.readline().strip()
if line.startswith("# - "):
line = line.strip("#- ")
var, val = line.split(":")
if val.strip().replace(".", "").isdigit():
# channel width, flow rate, viscosity
val = float(val)
else:
# columns, calculation
val = val.strip().lower()
meta[var.strip()] = val
elif line and not line.startswith("#"):
break
assert meta["column 1"] in dfn.scalar_feature_names
assert meta["column 2"] in dfn.scalar_feature_names
assert meta["column 3"] == "emodulus"
assert meta["method"] in VALID_METHODS
# Load isoelasics
with path.open("rb") as isfd:
isodata = np.loadtxt(isfd)
# Slice out individual isoelastics
emoduli = np.unique(isodata[:, 2])
isoel = []
for emod in emoduli:
where = isodata[:, 2] == emod
isoel.append(isodata[where])
# Add isoelastics to instance
self.add(isoel=isoel,
col1=meta["column 1"],
col2=meta["column 2"],
channel_width=meta["channel width [um]"],
flow_rate=meta["flow rate [ul/s]"],
viscosity=meta["viscosity [mPa*s]"],
method=meta["method"]) | [
"def",
"load_data",
"(",
"self",
",",
"path",
")",
":",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"path",
")",
".",
"resolve",
"(",
")",
"# Get metadata",
"meta",
"=",
"{",
"}",
"with",
"path",
".",
"open",
"(",
")",
"as",
"fd",
":",
"while",
"Tr... | Load isoelastics from a text file
The text file is loaded with `numpy.loadtxt` and must have
three columns, representing the two data columns and the
elastic modulus with units defined in `definitions.py`.
The file header must have a section defining meta data of the
content like so:
# [...]
#
# - column 1: area_um
# - column 2: deform
# - column 3: emodulus
# - channel width [um]: 20
# - flow rate [ul/s]: 0.04
# - viscosity [mPa*s]: 15
# - method: analytical
#
# [...]
Parameters
----------
path: str
Path to a isoelastics text file | [
"Load",
"isoelastics",
"from",
"a",
"text",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L348-L417 | train | 48,644 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/fl_crosstalk.py | get_compensation_matrix | def get_compensation_matrix(ct21, ct31, ct12, ct32, ct13, ct23):
"""Compute crosstalk inversion matrix
The spillover matrix is
| | c11 c12 c13 |
| | c21 c22 c23 |
| | c31 c32 c33 |
The diagonal elements are set to 1, i.e.
ct11 = c22 = c33 = 1
Parameters
----------
cij: float
Spill from channel i to channel j
Returns
-------
inv: np.ndarray
Compensation matrix (inverted spillover matrix)
"""
ct11 = 1
ct22 = 1
ct33 = 1
if ct21 < 0:
raise ValueError("ct21 matrix element must not be negative!")
if ct31 < 0:
raise ValueError("ct31 matrix element must not be negative!")
if ct12 < 0:
raise ValueError("ct12 matrix element must not be negative!")
if ct32 < 0:
raise ValueError("ct32 matrix element must not be negative!")
if ct13 < 0:
raise ValueError("ct13 matrix element must not be negative!")
if ct23 < 0:
raise ValueError("ct23 matrix element must not be negative!")
crosstalk = np.array([[ct11, ct12, ct13],
[ct21, ct22, ct23],
[ct31, ct32, ct33],
])
return np.linalg.inv(crosstalk) | python | def get_compensation_matrix(ct21, ct31, ct12, ct32, ct13, ct23):
"""Compute crosstalk inversion matrix
The spillover matrix is
| | c11 c12 c13 |
| | c21 c22 c23 |
| | c31 c32 c33 |
The diagonal elements are set to 1, i.e.
ct11 = c22 = c33 = 1
Parameters
----------
cij: float
Spill from channel i to channel j
Returns
-------
inv: np.ndarray
Compensation matrix (inverted spillover matrix)
"""
ct11 = 1
ct22 = 1
ct33 = 1
if ct21 < 0:
raise ValueError("ct21 matrix element must not be negative!")
if ct31 < 0:
raise ValueError("ct31 matrix element must not be negative!")
if ct12 < 0:
raise ValueError("ct12 matrix element must not be negative!")
if ct32 < 0:
raise ValueError("ct32 matrix element must not be negative!")
if ct13 < 0:
raise ValueError("ct13 matrix element must not be negative!")
if ct23 < 0:
raise ValueError("ct23 matrix element must not be negative!")
crosstalk = np.array([[ct11, ct12, ct13],
[ct21, ct22, ct23],
[ct31, ct32, ct33],
])
return np.linalg.inv(crosstalk) | [
"def",
"get_compensation_matrix",
"(",
"ct21",
",",
"ct31",
",",
"ct12",
",",
"ct32",
",",
"ct13",
",",
"ct23",
")",
":",
"ct11",
"=",
"1",
"ct22",
"=",
"1",
"ct33",
"=",
"1",
"if",
"ct21",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"ct21 matrix el... | Compute crosstalk inversion matrix
The spillover matrix is
| | c11 c12 c13 |
| | c21 c22 c23 |
| | c31 c32 c33 |
The diagonal elements are set to 1, i.e.
ct11 = c22 = c33 = 1
Parameters
----------
cij: float
Spill from channel i to channel j
Returns
-------
inv: np.ndarray
Compensation matrix (inverted spillover matrix) | [
"Compute",
"crosstalk",
"inversion",
"matrix"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/fl_crosstalk.py#L9-L58 | train | 48,645 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/fl_crosstalk.py | correct_crosstalk | def correct_crosstalk(fl1, fl2, fl3, fl_channel,
ct21=0, ct31=0, ct12=0, ct32=0, ct13=0, ct23=0):
"""Perform crosstalk correction
Parameters
----------
fli: int, float, or np.ndarray
Measured fluorescence signals
fl_channel: int (1, 2, or 3)
The channel number for which the crosstalk-corrected signal
should be computed
cij: float
Spill (crosstalk or bleed-through) from channel i to channel j
This spill is computed from the fluorescence signal of e.g.
single-stained positive control cells; It is defined by the
ratio of the fluorescence signals of the two channels, i.e
cij = flj / fli.
See Also
--------
get_compensation_matrix: compute the inverse crosstalk matrix
Notes
-----
If there are only two channels (e.g. fl1 and fl2), then the
crosstalk to and from the other channel (ct31, ct32, ct13, ct23)
should be set to zero.
"""
fl_channel = int(fl_channel)
if fl_channel not in [1, 2, 3]:
raise ValueError("`fl_channel` must be 1, 2, or 3!")
minv = get_compensation_matrix(ct21=ct21, ct31=ct31, ct12=ct12,
ct32=ct32, ct13=ct13, ct23=ct23)
col = minv[:, fl_channel - 1].flatten()
flout = col[0] * fl1 + col[1] * fl2 + col[2] * fl3
return flout | python | def correct_crosstalk(fl1, fl2, fl3, fl_channel,
ct21=0, ct31=0, ct12=0, ct32=0, ct13=0, ct23=0):
"""Perform crosstalk correction
Parameters
----------
fli: int, float, or np.ndarray
Measured fluorescence signals
fl_channel: int (1, 2, or 3)
The channel number for which the crosstalk-corrected signal
should be computed
cij: float
Spill (crosstalk or bleed-through) from channel i to channel j
This spill is computed from the fluorescence signal of e.g.
single-stained positive control cells; It is defined by the
ratio of the fluorescence signals of the two channels, i.e
cij = flj / fli.
See Also
--------
get_compensation_matrix: compute the inverse crosstalk matrix
Notes
-----
If there are only two channels (e.g. fl1 and fl2), then the
crosstalk to and from the other channel (ct31, ct32, ct13, ct23)
should be set to zero.
"""
fl_channel = int(fl_channel)
if fl_channel not in [1, 2, 3]:
raise ValueError("`fl_channel` must be 1, 2, or 3!")
minv = get_compensation_matrix(ct21=ct21, ct31=ct31, ct12=ct12,
ct32=ct32, ct13=ct13, ct23=ct23)
col = minv[:, fl_channel - 1].flatten()
flout = col[0] * fl1 + col[1] * fl2 + col[2] * fl3
return flout | [
"def",
"correct_crosstalk",
"(",
"fl1",
",",
"fl2",
",",
"fl3",
",",
"fl_channel",
",",
"ct21",
"=",
"0",
",",
"ct31",
"=",
"0",
",",
"ct12",
"=",
"0",
",",
"ct32",
"=",
"0",
",",
"ct13",
"=",
"0",
",",
"ct23",
"=",
"0",
")",
":",
"fl_channel",... | Perform crosstalk correction
Parameters
----------
fli: int, float, or np.ndarray
Measured fluorescence signals
fl_channel: int (1, 2, or 3)
The channel number for which the crosstalk-corrected signal
should be computed
cij: float
Spill (crosstalk or bleed-through) from channel i to channel j
This spill is computed from the fluorescence signal of e.g.
single-stained positive control cells; It is defined by the
ratio of the fluorescence signals of the two channels, i.e
cij = flj / fli.
See Also
--------
get_compensation_matrix: compute the inverse crosstalk matrix
Notes
-----
If there are only two channels (e.g. fl1 and fl2), then the
crosstalk to and from the other channel (ct31, ct32, ct13, ct23)
should be set to zero. | [
"Perform",
"crosstalk",
"correction"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/fl_crosstalk.py#L61-L98 | train | 48,646 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/inert_ratio.py | get_inert_ratio_cvx | def get_inert_ratio_cvx(cont):
"""Compute the inertia ratio of the convex hull of a contour
The inertia ratio is computed from the central second order of moments
along x (mu20) and y (mu02) via `sqrt(mu20/mu02)`.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_cvx: float or ndarray of size N
The inertia ratio of the contour's convex hull
Notes
-----
The contour moments mu20 and mu02 are computed the same way they
are computed in OpenCV's `moments.cpp`.
See Also
--------
get_inert_ratio_raw: Compute inertia ratio of a raw contour
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Central_moments>`__
- `<https://github.com/opencv/opencv/blob/
f81370232a651bdac5042efe907bcaa50a66c487/modules/imgproc/src/
moments.cpp#L93>`__
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
inert_ratio_cvx = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
try:
chull = ssp.ConvexHull(cont[ii])
except ssp.qhull.QhullError:
pass
else:
hull = cont[ii][chull.vertices, :]
inert_ratio_cvx[ii] = get_inert_ratio_raw(hull)
if not ret_list:
inert_ratio_cvx = inert_ratio_cvx[0]
return inert_ratio_cvx | python | def get_inert_ratio_cvx(cont):
"""Compute the inertia ratio of the convex hull of a contour
The inertia ratio is computed from the central second order of moments
along x (mu20) and y (mu02) via `sqrt(mu20/mu02)`.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_cvx: float or ndarray of size N
The inertia ratio of the contour's convex hull
Notes
-----
The contour moments mu20 and mu02 are computed the same way they
are computed in OpenCV's `moments.cpp`.
See Also
--------
get_inert_ratio_raw: Compute inertia ratio of a raw contour
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Central_moments>`__
- `<https://github.com/opencv/opencv/blob/
f81370232a651bdac5042efe907bcaa50a66c487/modules/imgproc/src/
moments.cpp#L93>`__
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
inert_ratio_cvx = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
try:
chull = ssp.ConvexHull(cont[ii])
except ssp.qhull.QhullError:
pass
else:
hull = cont[ii][chull.vertices, :]
inert_ratio_cvx[ii] = get_inert_ratio_raw(hull)
if not ret_list:
inert_ratio_cvx = inert_ratio_cvx[0]
return inert_ratio_cvx | [
"def",
"get_inert_ratio_cvx",
"(",
"cont",
")",
":",
"if",
"isinstance",
"(",
"cont",
",",
"np",
".",
"ndarray",
")",
":",
"# If cont is an array, it is not a list of contours,",
"# because contours can have different lengths.",
"cont",
"=",
"[",
"cont",
"]",
"ret_list"... | Compute the inertia ratio of the convex hull of a contour
The inertia ratio is computed from the central second order of moments
along x (mu20) and y (mu02) via `sqrt(mu20/mu02)`.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_cvx: float or ndarray of size N
The inertia ratio of the contour's convex hull
Notes
-----
The contour moments mu20 and mu02 are computed the same way they
are computed in OpenCV's `moments.cpp`.
See Also
--------
get_inert_ratio_raw: Compute inertia ratio of a raw contour
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Central_moments>`__
- `<https://github.com/opencv/opencv/blob/
f81370232a651bdac5042efe907bcaa50a66c487/modules/imgproc/src/
moments.cpp#L93>`__ | [
"Compute",
"the",
"inertia",
"ratio",
"of",
"the",
"convex",
"hull",
"of",
"a",
"contour"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/inert_ratio.py#L117-L178 | train | 48,647 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/inert_ratio.py | get_inert_ratio_prnc | def get_inert_ratio_prnc(cont):
"""Compute principal inertia ratio of a contour
The principal inertia ratio is rotation-invariant, which
makes it applicable to reservoir measurements where e.g.
cells are not aligned with the channel.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_prnc: float or ndarray of size N
The principal inertia ratio of the contour
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
inert_ratio_prnc = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
moments = cont_moments_cv(cont[ii])
if moments is not None:
# orientation of the contour
orient = 0.5 * np.arctan2(2 * moments['mu11'],
moments['mu02'] - moments['mu20'])
# require floating point array (only copy if necessary)
cc = np.array(cont[ii], dtype=float, copy=False)
# rotate contour
rho = np.sqrt(cc[:, 0]**2 + cc[:, 1]**2)
phi = np.arctan2(cc[:, 1], cc[:, 0]) + orient + np.pi / 2
cc[:, 0] = rho * np.cos(phi)
cc[:, 1] = rho * np.sin(phi)
# compute inertia ratio of rotated contour
mprnc = cont_moments_cv(cc)
inert_ratio_prnc[ii] = np.sqrt(mprnc["mu20"] / mprnc["mu02"])
if not ret_list:
inert_ratio_prnc = inert_ratio_prnc[0]
return inert_ratio_prnc | python | def get_inert_ratio_prnc(cont):
"""Compute principal inertia ratio of a contour
The principal inertia ratio is rotation-invariant, which
makes it applicable to reservoir measurements where e.g.
cells are not aligned with the channel.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_prnc: float or ndarray of size N
The principal inertia ratio of the contour
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
inert_ratio_prnc = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
moments = cont_moments_cv(cont[ii])
if moments is not None:
# orientation of the contour
orient = 0.5 * np.arctan2(2 * moments['mu11'],
moments['mu02'] - moments['mu20'])
# require floating point array (only copy if necessary)
cc = np.array(cont[ii], dtype=float, copy=False)
# rotate contour
rho = np.sqrt(cc[:, 0]**2 + cc[:, 1]**2)
phi = np.arctan2(cc[:, 1], cc[:, 0]) + orient + np.pi / 2
cc[:, 0] = rho * np.cos(phi)
cc[:, 1] = rho * np.sin(phi)
# compute inertia ratio of rotated contour
mprnc = cont_moments_cv(cc)
inert_ratio_prnc[ii] = np.sqrt(mprnc["mu20"] / mprnc["mu02"])
if not ret_list:
inert_ratio_prnc = inert_ratio_prnc[0]
return inert_ratio_prnc | [
"def",
"get_inert_ratio_prnc",
"(",
"cont",
")",
":",
"if",
"isinstance",
"(",
"cont",
",",
"np",
".",
"ndarray",
")",
":",
"# If cont is an array, it is not a list of contours,",
"# because contours can have different lengths.",
"cont",
"=",
"[",
"cont",
"]",
"ret_list... | Compute principal inertia ratio of a contour
The principal inertia ratio is rotation-invariant, which
makes it applicable to reservoir measurements where e.g.
cells are not aligned with the channel.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_prnc: float or ndarray of size N
The principal inertia ratio of the contour | [
"Compute",
"principal",
"inertia",
"ratio",
"of",
"a",
"contour"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/inert_ratio.py#L181-L233 | train | 48,648 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/inert_ratio.py | get_inert_ratio_raw | def get_inert_ratio_raw(cont):
"""Compute the inertia ratio of a contour
The inertia ratio is computed from the central second order of moments
along x (mu20) and y (mu02) via `sqrt(mu20/mu02)`.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_raw: float or ndarray of size N
The inertia ratio of the contour
Notes
-----
The contour moments mu20 and mu02 are computed the same way they
are computed in OpenCV's `moments.cpp`.
See Also
--------
get_inert_ratio_cvx: Compute inertia ratio of the convex hull of
a contour
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Central_moments>`__
- `<https://github.com/opencv/opencv/blob/
f81370232a651bdac5042efe907bcaa50a66c487/modules/imgproc/src/
moments.cpp#L93>`__
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
inert_ratio_raw = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
moments = cont_moments_cv(cont[ii])
if moments is not None:
inert_ratio_raw[ii] = np.sqrt(moments["mu20"]/moments["mu02"])
if not ret_list:
inert_ratio_raw = inert_ratio_raw[0]
return inert_ratio_raw | python | def get_inert_ratio_raw(cont):
"""Compute the inertia ratio of a contour
The inertia ratio is computed from the central second order of moments
along x (mu20) and y (mu02) via `sqrt(mu20/mu02)`.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_raw: float or ndarray of size N
The inertia ratio of the contour
Notes
-----
The contour moments mu20 and mu02 are computed the same way they
are computed in OpenCV's `moments.cpp`.
See Also
--------
get_inert_ratio_cvx: Compute inertia ratio of the convex hull of
a contour
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Central_moments>`__
- `<https://github.com/opencv/opencv/blob/
f81370232a651bdac5042efe907bcaa50a66c487/modules/imgproc/src/
moments.cpp#L93>`__
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
inert_ratio_raw = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
moments = cont_moments_cv(cont[ii])
if moments is not None:
inert_ratio_raw[ii] = np.sqrt(moments["mu20"]/moments["mu02"])
if not ret_list:
inert_ratio_raw = inert_ratio_raw[0]
return inert_ratio_raw | [
"def",
"get_inert_ratio_raw",
"(",
"cont",
")",
":",
"if",
"isinstance",
"(",
"cont",
",",
"np",
".",
"ndarray",
")",
":",
"# If cont is an array, it is not a list of contours,",
"# because contours can have different lengths.",
"cont",
"=",
"[",
"cont",
"]",
"ret_list"... | Compute the inertia ratio of a contour
The inertia ratio is computed from the central second order of moments
along x (mu20) and y (mu02) via `sqrt(mu20/mu02)`.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
inert_ratio_raw: float or ndarray of size N
The inertia ratio of the contour
Notes
-----
The contour moments mu20 and mu02 are computed the same way they
are computed in OpenCV's `moments.cpp`.
See Also
--------
get_inert_ratio_cvx: Compute inertia ratio of the convex hull of
a contour
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Central_moments>`__
- `<https://github.com/opencv/opencv/blob/
f81370232a651bdac5042efe907bcaa50a66c487/modules/imgproc/src/
moments.cpp#L93>`__ | [
"Compute",
"the",
"inertia",
"ratio",
"of",
"a",
"contour"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/inert_ratio.py#L236-L292 | train | 48,649 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/inert_ratio.py | get_tilt | def get_tilt(cont):
"""Compute tilt of raw contour relative to channel axis
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
tilt: float or ndarray of size N
Tilt of the contour in the interval [0, PI/2]
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Examples_2>`__
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
tilt = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
moments = cont_moments_cv(cont[ii])
if moments is not None:
# orientation of the contour
oii = 0.5 * np.arctan2(2 * moments['mu11'],
moments['mu02'] - moments['mu20'])
# +PI/2 because relative to channel axis
tilt[ii] = oii + np.pi/2
# restrict to interval [0,PI/2]
tilt = np.mod(tilt, np.pi)
tilt[tilt > np.pi/2] -= np.pi
tilt = np.abs(tilt)
if not ret_list:
tilt = tilt[0]
return tilt | python | def get_tilt(cont):
"""Compute tilt of raw contour relative to channel axis
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
tilt: float or ndarray of size N
Tilt of the contour in the interval [0, PI/2]
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Examples_2>`__
"""
if isinstance(cont, np.ndarray):
# If cont is an array, it is not a list of contours,
# because contours can have different lengths.
cont = [cont]
ret_list = False
else:
ret_list = True
length = len(cont)
tilt = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
moments = cont_moments_cv(cont[ii])
if moments is not None:
# orientation of the contour
oii = 0.5 * np.arctan2(2 * moments['mu11'],
moments['mu02'] - moments['mu20'])
# +PI/2 because relative to channel axis
tilt[ii] = oii + np.pi/2
# restrict to interval [0,PI/2]
tilt = np.mod(tilt, np.pi)
tilt[tilt > np.pi/2] -= np.pi
tilt = np.abs(tilt)
if not ret_list:
tilt = tilt[0]
return tilt | [
"def",
"get_tilt",
"(",
"cont",
")",
":",
"if",
"isinstance",
"(",
"cont",
",",
"np",
".",
"ndarray",
")",
":",
"# If cont is an array, it is not a list of contours,",
"# because contours can have different lengths.",
"cont",
"=",
"[",
"cont",
"]",
"ret_list",
"=",
... | Compute tilt of raw contour relative to channel axis
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
Returns
-------
tilt: float or ndarray of size N
Tilt of the contour in the interval [0, PI/2]
References
----------
- `<https://en.wikipedia.org/wiki/Image_moment#Examples_2>`__ | [
"Compute",
"tilt",
"of",
"raw",
"contour",
"relative",
"to",
"channel",
"axis"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/inert_ratio.py#L295-L344 | train | 48,650 |
deep-compute/funcserver | funcserver/funcserver.py | tag | def tag(*tags):
'''
Constructs a decorator that tags a function with specified
strings (@tags). The tags on the decorated function are
available via fn.tags
'''
def dfn(fn):
_tags = getattr(fn, 'tags', set())
_tags.update(tags)
fn.tags = _tags
return fn
return dfn | python | def tag(*tags):
'''
Constructs a decorator that tags a function with specified
strings (@tags). The tags on the decorated function are
available via fn.tags
'''
def dfn(fn):
_tags = getattr(fn, 'tags', set())
_tags.update(tags)
fn.tags = _tags
return fn
return dfn | [
"def",
"tag",
"(",
"*",
"tags",
")",
":",
"def",
"dfn",
"(",
"fn",
")",
":",
"_tags",
"=",
"getattr",
"(",
"fn",
",",
"'tags'",
",",
"set",
"(",
")",
")",
"_tags",
".",
"update",
"(",
"tags",
")",
"fn",
".",
"tags",
"=",
"_tags",
"return",
"f... | Constructs a decorator that tags a function with specified
strings (@tags). The tags on the decorated function are
available via fn.tags | [
"Constructs",
"a",
"decorator",
"that",
"tags",
"a",
"function",
"with",
"specified",
"strings",
"("
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L42-L53 | train | 48,651 |
deep-compute/funcserver | funcserver/funcserver.py | raw | def raw(mime='application/octet-stream'):
'''
Constructs a decorator that marks the fn
as raw response format
'''
def dfn(fn):
tags = getattr(fn, 'tags', set())
tags.add('raw')
fn.tags = tags
fn.mime = getattr(fn, 'mime', mime)
return fn
return dfn | python | def raw(mime='application/octet-stream'):
'''
Constructs a decorator that marks the fn
as raw response format
'''
def dfn(fn):
tags = getattr(fn, 'tags', set())
tags.add('raw')
fn.tags = tags
fn.mime = getattr(fn, 'mime', mime)
return fn
return dfn | [
"def",
"raw",
"(",
"mime",
"=",
"'application/octet-stream'",
")",
":",
"def",
"dfn",
"(",
"fn",
")",
":",
"tags",
"=",
"getattr",
"(",
"fn",
",",
"'tags'",
",",
"set",
"(",
")",
")",
"tags",
".",
"add",
"(",
"'raw'",
")",
"fn",
".",
"tags",
"=",... | Constructs a decorator that marks the fn
as raw response format | [
"Constructs",
"a",
"decorator",
"that",
"marks",
"the",
"fn",
"as",
"raw",
"response",
"format"
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L69-L80 | train | 48,652 |
deep-compute/funcserver | funcserver/funcserver.py | WSConnection.open | def open(self, pysession_id):
'''
Called when client opens connection. Initialization
is done here.
'''
self.id = id(self)
self.funcserver = self.application.funcserver
self.pysession_id = pysession_id
# register this connection with node
self.state = self.funcserver.websocks[self.id] = {'id': self.id, 'sock': self} | python | def open(self, pysession_id):
'''
Called when client opens connection. Initialization
is done here.
'''
self.id = id(self)
self.funcserver = self.application.funcserver
self.pysession_id = pysession_id
# register this connection with node
self.state = self.funcserver.websocks[self.id] = {'id': self.id, 'sock': self} | [
"def",
"open",
"(",
"self",
",",
"pysession_id",
")",
":",
"self",
".",
"id",
"=",
"id",
"(",
"self",
")",
"self",
".",
"funcserver",
"=",
"self",
".",
"application",
".",
"funcserver",
"self",
".",
"pysession_id",
"=",
"pysession_id",
"# register this con... | Called when client opens connection. Initialization
is done here. | [
"Called",
"when",
"client",
"opens",
"connection",
".",
"Initialization",
"is",
"done",
"here",
"."
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L115-L125 | train | 48,653 |
deep-compute/funcserver | funcserver/funcserver.py | WSConnection.on_message | def on_message(self, msg):
'''
Called when client sends a message.
Supports a python debugging console. This forms
the "eval" part of a standard read-eval-print loop.
Currently the only implementation of the python
console is in the WebUI but the implementation
of a terminal based console is planned.
'''
msg = json.loads(msg)
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession is None:
interpreter = PyInterpreter(self.funcserver.define_python_namespace())
psession = dict(interpreter=interpreter, socks=set([self.id]))
self.funcserver.pysessions[self.pysession_id] = psession
else:
interpreter = psession['interpreter']
psession['socks'].add(self.id)
code = msg['code']
msg_id = msg['id']
stdout = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
interpreter.runsource(code)
output = sys.stdout.getvalue() or interpreter.output
if isinstance(output, list): output = ''.join(output)
interpreter.output = []
finally:
sys.stdout = stdout
msg = {'type': MSG_TYPE_CONSOLE, 'id': msg_id, 'data': output}
self.send_message(msg) | python | def on_message(self, msg):
'''
Called when client sends a message.
Supports a python debugging console. This forms
the "eval" part of a standard read-eval-print loop.
Currently the only implementation of the python
console is in the WebUI but the implementation
of a terminal based console is planned.
'''
msg = json.loads(msg)
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession is None:
interpreter = PyInterpreter(self.funcserver.define_python_namespace())
psession = dict(interpreter=interpreter, socks=set([self.id]))
self.funcserver.pysessions[self.pysession_id] = psession
else:
interpreter = psession['interpreter']
psession['socks'].add(self.id)
code = msg['code']
msg_id = msg['id']
stdout = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
interpreter.runsource(code)
output = sys.stdout.getvalue() or interpreter.output
if isinstance(output, list): output = ''.join(output)
interpreter.output = []
finally:
sys.stdout = stdout
msg = {'type': MSG_TYPE_CONSOLE, 'id': msg_id, 'data': output}
self.send_message(msg) | [
"def",
"on_message",
"(",
"self",
",",
"msg",
")",
":",
"msg",
"=",
"json",
".",
"loads",
"(",
"msg",
")",
"psession",
"=",
"self",
".",
"funcserver",
".",
"pysessions",
".",
"get",
"(",
"self",
".",
"pysession_id",
",",
"None",
")",
"if",
"psession"... | Called when client sends a message.
Supports a python debugging console. This forms
the "eval" part of a standard read-eval-print loop.
Currently the only implementation of the python
console is in the WebUI but the implementation
of a terminal based console is planned. | [
"Called",
"when",
"client",
"sends",
"a",
"message",
"."
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L127-L164 | train | 48,654 |
deep-compute/funcserver | funcserver/funcserver.py | WSConnection.on_close | def on_close(self):
'''
Called when client closes this connection. Cleanup
is done here.
'''
if self.id in self.funcserver.websocks:
self.funcserver.websocks[self.id] = None
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(lambda: self.funcserver.websocks.pop(self.id, None))
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession:
psession['socks'].remove(self.id)
if not psession['socks']:
del self.funcserver.pysessions[self.pysession_id] | python | def on_close(self):
'''
Called when client closes this connection. Cleanup
is done here.
'''
if self.id in self.funcserver.websocks:
self.funcserver.websocks[self.id] = None
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(lambda: self.funcserver.websocks.pop(self.id, None))
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession:
psession['socks'].remove(self.id)
if not psession['socks']:
del self.funcserver.pysessions[self.pysession_id] | [
"def",
"on_close",
"(",
"self",
")",
":",
"if",
"self",
".",
"id",
"in",
"self",
".",
"funcserver",
".",
"websocks",
":",
"self",
".",
"funcserver",
".",
"websocks",
"[",
"self",
".",
"id",
"]",
"=",
"None",
"ioloop",
"=",
"tornado",
".",
"ioloop",
... | Called when client closes this connection. Cleanup
is done here. | [
"Called",
"when",
"client",
"closes",
"this",
"connection",
".",
"Cleanup",
"is",
"done",
"here",
"."
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L166-L181 | train | 48,655 |
deep-compute/funcserver | funcserver/funcserver.py | RPCHandler._clean_kwargs | def _clean_kwargs(self, kwargs, fn):
'''
Remove unexpected keyword arguments from the
set of received keyword arguments.
'''
# Do not do the cleaning if server config
# doesnt ask to ignore
if not self.server.IGNORE_UNEXPECTED_KWARGS:
return kwargs
expected_kwargs = set(inspect.getargspec(fn).args)
got_kwargs = set(kwargs.keys())
unexpected_kwargs = got_kwargs - expected_kwargs
for k in unexpected_kwargs:
del kwargs[k]
return kwargs | python | def _clean_kwargs(self, kwargs, fn):
'''
Remove unexpected keyword arguments from the
set of received keyword arguments.
'''
# Do not do the cleaning if server config
# doesnt ask to ignore
if not self.server.IGNORE_UNEXPECTED_KWARGS:
return kwargs
expected_kwargs = set(inspect.getargspec(fn).args)
got_kwargs = set(kwargs.keys())
unexpected_kwargs = got_kwargs - expected_kwargs
for k in unexpected_kwargs:
del kwargs[k]
return kwargs | [
"def",
"_clean_kwargs",
"(",
"self",
",",
"kwargs",
",",
"fn",
")",
":",
"# Do not do the cleaning if server config",
"# doesnt ask to ignore",
"if",
"not",
"self",
".",
"server",
".",
"IGNORE_UNEXPECTED_KWARGS",
":",
"return",
"kwargs",
"expected_kwargs",
"=",
"set",... | Remove unexpected keyword arguments from the
set of received keyword arguments. | [
"Remove",
"unexpected",
"keyword",
"arguments",
"from",
"the",
"set",
"of",
"received",
"keyword",
"arguments",
"."
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L301-L317 | train | 48,656 |
deep-compute/funcserver | funcserver/funcserver.py | Server.dump_stacks | def dump_stacks(self):
'''
Dumps the stack of all threads. This function
is meant for debugging. Useful when a deadlock happens.
borrowed from: http://blog.ziade.org/2012/05/25/zmq-and-gevent-debugging-nightmares/
'''
dump = []
# threads
threads = dict([(th.ident, th.name)
for th in threading.enumerate()])
for thread, frame in sys._current_frames().items():
if thread not in threads: continue
dump.append('Thread 0x%x (%s)\n' % (thread, threads[thread]))
dump.append(''.join(traceback.format_stack(frame)))
dump.append('\n')
return ''.join(dump) | python | def dump_stacks(self):
'''
Dumps the stack of all threads. This function
is meant for debugging. Useful when a deadlock happens.
borrowed from: http://blog.ziade.org/2012/05/25/zmq-and-gevent-debugging-nightmares/
'''
dump = []
# threads
threads = dict([(th.ident, th.name)
for th in threading.enumerate()])
for thread, frame in sys._current_frames().items():
if thread not in threads: continue
dump.append('Thread 0x%x (%s)\n' % (thread, threads[thread]))
dump.append(''.join(traceback.format_stack(frame)))
dump.append('\n')
return ''.join(dump) | [
"def",
"dump_stacks",
"(",
"self",
")",
":",
"dump",
"=",
"[",
"]",
"# threads",
"threads",
"=",
"dict",
"(",
"[",
"(",
"th",
".",
"ident",
",",
"th",
".",
"name",
")",
"for",
"th",
"in",
"threading",
".",
"enumerate",
"(",
")",
"]",
")",
"for",
... | Dumps the stack of all threads. This function
is meant for debugging. Useful when a deadlock happens.
borrowed from: http://blog.ziade.org/2012/05/25/zmq-and-gevent-debugging-nightmares/ | [
"Dumps",
"the",
"stack",
"of",
"all",
"threads",
".",
"This",
"function",
"is",
"meant",
"for",
"debugging",
".",
"Useful",
"when",
"a",
"deadlock",
"happens",
"."
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L522-L542 | train | 48,657 |
deep-compute/funcserver | funcserver/funcserver.py | Server.define_log_pre_format_hooks | def define_log_pre_format_hooks(self):
"""
adds a hook to send to websocket if the run command was selected
"""
hooks = super(Server, self).define_log_pre_format_hooks()
# NOTE enabling logs only on debug mode
if self.args.func == self.run and self.args.debug:
hooks.append(self._send_log_to_ws)
return hooks | python | def define_log_pre_format_hooks(self):
"""
adds a hook to send to websocket if the run command was selected
"""
hooks = super(Server, self).define_log_pre_format_hooks()
# NOTE enabling logs only on debug mode
if self.args.func == self.run and self.args.debug:
hooks.append(self._send_log_to_ws)
return hooks | [
"def",
"define_log_pre_format_hooks",
"(",
"self",
")",
":",
"hooks",
"=",
"super",
"(",
"Server",
",",
"self",
")",
".",
"define_log_pre_format_hooks",
"(",
")",
"# NOTE enabling logs only on debug mode",
"if",
"self",
".",
"args",
".",
"func",
"==",
"self",
".... | adds a hook to send to websocket if the run command was selected | [
"adds",
"a",
"hook",
"to",
"send",
"to",
"websocket",
"if",
"the",
"run",
"command",
"was",
"selected"
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L563-L572 | train | 48,658 |
deep-compute/funcserver | funcserver/funcserver.py | Server.run | def run(self):
""" prepares the api and starts the tornado funcserver """
self.log_id = 0
# all active websockets and their state
self.websocks = {}
# all active python interpreter sessions
self.pysessions = {}
if self.DISABLE_REQUESTS_DEBUG_LOGS:
disable_requests_debug_logs()
self.threadpool = ThreadPool(self.THREADPOOL_WORKERS)
self.api = None
# tornado app object
base_handlers = self.prepare_base_handlers()
handlers = self.prepare_handlers()
self.template_loader = TemplateLoader([resolve_path(self.TEMPLATE_PATH)])
_ = self.prepare_template_loader(self.template_loader)
if _ is not None: self.template_loader = _
shclass = CustomStaticFileHandler
shclass.PATHS.append(resolve_path(self.STATIC_PATH))
_ = self.prepare_static_paths(shclass.PATHS)
if _ is not None: shclass.PATHS = _
self.static_handler_class = shclass
self.nav_tabs = [('Home', '/')]
if self.args.debug:
self.nav_tabs += [('Console', '/console'), ('Logs', '/logs')]
self.nav_tabs = self.prepare_nav_tabs(self.nav_tabs)
settings = {
'static_path': '<DUMMY-INEXISTENT-PATH>',
'static_handler_class': self.static_handler_class,
'template_loader': self.template_loader,
'compress_response': True,
'debug': self.args.debug,
}
all_handlers = handlers + base_handlers
self.app = self.APP_CLASS(**settings)
self.app.add_handlers(self.VIRTUAL_HOST, all_handlers)
sys.funcserver = self.app.funcserver = self
self.api = self.prepare_api()
if self.api is not None and not hasattr(self.api, 'log'):
self.api.log = self.log
if self.args.port != 0:
self.app.listen(self.args.port)
tornado.ioloop.IOLoop.instance().start() | python | def run(self):
""" prepares the api and starts the tornado funcserver """
self.log_id = 0
# all active websockets and their state
self.websocks = {}
# all active python interpreter sessions
self.pysessions = {}
if self.DISABLE_REQUESTS_DEBUG_LOGS:
disable_requests_debug_logs()
self.threadpool = ThreadPool(self.THREADPOOL_WORKERS)
self.api = None
# tornado app object
base_handlers = self.prepare_base_handlers()
handlers = self.prepare_handlers()
self.template_loader = TemplateLoader([resolve_path(self.TEMPLATE_PATH)])
_ = self.prepare_template_loader(self.template_loader)
if _ is not None: self.template_loader = _
shclass = CustomStaticFileHandler
shclass.PATHS.append(resolve_path(self.STATIC_PATH))
_ = self.prepare_static_paths(shclass.PATHS)
if _ is not None: shclass.PATHS = _
self.static_handler_class = shclass
self.nav_tabs = [('Home', '/')]
if self.args.debug:
self.nav_tabs += [('Console', '/console'), ('Logs', '/logs')]
self.nav_tabs = self.prepare_nav_tabs(self.nav_tabs)
settings = {
'static_path': '<DUMMY-INEXISTENT-PATH>',
'static_handler_class': self.static_handler_class,
'template_loader': self.template_loader,
'compress_response': True,
'debug': self.args.debug,
}
all_handlers = handlers + base_handlers
self.app = self.APP_CLASS(**settings)
self.app.add_handlers(self.VIRTUAL_HOST, all_handlers)
sys.funcserver = self.app.funcserver = self
self.api = self.prepare_api()
if self.api is not None and not hasattr(self.api, 'log'):
self.api.log = self.log
if self.args.port != 0:
self.app.listen(self.args.port)
tornado.ioloop.IOLoop.instance().start() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"log_id",
"=",
"0",
"# all active websockets and their state",
"self",
".",
"websocks",
"=",
"{",
"}",
"# all active python interpreter sessions",
"self",
".",
"pysessions",
"=",
"{",
"}",
"if",
"self",
".",
"D... | prepares the api and starts the tornado funcserver | [
"prepares",
"the",
"api",
"and",
"starts",
"the",
"tornado",
"funcserver"
] | ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23 | https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L656-L713 | train | 48,659 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | _get_sql | def _get_sql(filename):
"""Returns the contents of the sql file from the given ``filename``."""
with open(os.path.join(SQL_DIR, filename), 'r') as f:
return f.read() | python | def _get_sql(filename):
"""Returns the contents of the sql file from the given ``filename``."""
with open(os.path.join(SQL_DIR, filename), 'r') as f:
return f.read() | [
"def",
"_get_sql",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"SQL_DIR",
",",
"filename",
")",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] | Returns the contents of the sql file from the given ``filename``. | [
"Returns",
"the",
"contents",
"of",
"the",
"sql",
"file",
"from",
"the",
"given",
"filename",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L32-L35 | train | 48,660 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | verify_id_n_version | def verify_id_n_version(id, version):
"""Given an ``id`` and ``version``, verify the identified content exists.
"""
stmt = _get_sql('verify-id-and-version.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
valid = cursor.fetchone()[0]
except TypeError:
raise NotFound(join_ident_hash(id, version))
return True | python | def verify_id_n_version(id, version):
"""Given an ``id`` and ``version``, verify the identified content exists.
"""
stmt = _get_sql('verify-id-and-version.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
valid = cursor.fetchone()[0]
except TypeError:
raise NotFound(join_ident_hash(id, version))
return True | [
"def",
"verify_id_n_version",
"(",
"id",
",",
"version",
")",
":",
"stmt",
"=",
"_get_sql",
"(",
"'verify-id-and-version.sql'",
")",
"args",
"=",
"dict",
"(",
"id",
"=",
"id",
",",
"version",
"=",
"version",
")",
"with",
"db_connect",
"(",
")",
"as",
"db... | Given an ``id`` and ``version``, verify the identified content exists. | [
"Given",
"an",
"id",
"and",
"version",
"verify",
"the",
"identified",
"content",
"exists",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L38-L52 | train | 48,661 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | get_id_n_version | def get_id_n_version(ident_hash):
"""From the given ``ident_hash`` return the id and version."""
try:
id, version = split_ident_hash(ident_hash)
except IdentHashMissingVersion:
# XXX Don't import from views... And don't use httpexceptions
from pyramid.httpexceptions import HTTPNotFound
from cnxarchive.views.helpers import get_latest_version
try:
version = get_latest_version(ident_hash)
except HTTPNotFound:
raise NotFound(ident_hash)
id, version = split_ident_hash(join_ident_hash(ident_hash, version))
else:
verify_id_n_version(id, version)
return id, version | python | def get_id_n_version(ident_hash):
"""From the given ``ident_hash`` return the id and version."""
try:
id, version = split_ident_hash(ident_hash)
except IdentHashMissingVersion:
# XXX Don't import from views... And don't use httpexceptions
from pyramid.httpexceptions import HTTPNotFound
from cnxarchive.views.helpers import get_latest_version
try:
version = get_latest_version(ident_hash)
except HTTPNotFound:
raise NotFound(ident_hash)
id, version = split_ident_hash(join_ident_hash(ident_hash, version))
else:
verify_id_n_version(id, version)
return id, version | [
"def",
"get_id_n_version",
"(",
"ident_hash",
")",
":",
"try",
":",
"id",
",",
"version",
"=",
"split_ident_hash",
"(",
"ident_hash",
")",
"except",
"IdentHashMissingVersion",
":",
"# XXX Don't import from views... And don't use httpexceptions",
"from",
"pyramid",
".",
... | From the given ``ident_hash`` return the id and version. | [
"From",
"the",
"given",
"ident_hash",
"return",
"the",
"id",
"and",
"version",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L55-L71 | train | 48,662 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | get_type | def get_type(ident_hash):
"""Return the database type for the given ``ident_hash``
As of now, this could either be a 'Module' or 'Collection'.
"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-type.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
type = cursor.fetchone()[0]
return type | python | def get_type(ident_hash):
"""Return the database type for the given ``ident_hash``
As of now, this could either be a 'Module' or 'Collection'.
"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-type.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
type = cursor.fetchone()[0]
return type | [
"def",
"get_type",
"(",
"ident_hash",
")",
":",
"id",
",",
"version",
"=",
"get_id_n_version",
"(",
"ident_hash",
")",
"stmt",
"=",
"_get_sql",
"(",
"'get-type.sql'",
")",
"args",
"=",
"dict",
"(",
"id",
"=",
"id",
",",
"version",
"=",
"version",
")",
... | Return the database type for the given ``ident_hash``
As of now, this could either be a 'Module' or 'Collection'. | [
"Return",
"the",
"database",
"type",
"for",
"the",
"given",
"ident_hash",
"As",
"of",
"now",
"this",
"could",
"either",
"be",
"a",
"Module",
"or",
"Collection",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L74-L88 | train | 48,663 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | get_metadata | def get_metadata(ident_hash):
"""Return the dictionary of metadata from the database.
This data is keyed using the cnx-epub data structure.
"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-metadata.sql')
args = dict(id=id, version=version)
# FIXME The license_url and license_text metadata attributes need to
# change to a License structure similar to what is used in cnx-authoring.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
metadata = cursor.fetchone()[0]
except TypeError:
raise NotFound(ident_hash)
return metadata | python | def get_metadata(ident_hash):
"""Return the dictionary of metadata from the database.
This data is keyed using the cnx-epub data structure.
"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-metadata.sql')
args = dict(id=id, version=version)
# FIXME The license_url and license_text metadata attributes need to
# change to a License structure similar to what is used in cnx-authoring.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
metadata = cursor.fetchone()[0]
except TypeError:
raise NotFound(ident_hash)
return metadata | [
"def",
"get_metadata",
"(",
"ident_hash",
")",
":",
"id",
",",
"version",
"=",
"get_id_n_version",
"(",
"ident_hash",
")",
"stmt",
"=",
"_get_sql",
"(",
"'get-metadata.sql'",
")",
"args",
"=",
"dict",
"(",
"id",
"=",
"id",
",",
"version",
"=",
"version",
... | Return the dictionary of metadata from the database.
This data is keyed using the cnx-epub data structure. | [
"Return",
"the",
"dictionary",
"of",
"metadata",
"from",
"the",
"database",
".",
"This",
"data",
"is",
"keyed",
"using",
"the",
"cnx",
"-",
"epub",
"data",
"structure",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L91-L111 | train | 48,664 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | get_content | def get_content(ident_hash, context=None):
"""Returns the content for the given ``ident_hash``.
``context`` is optionally ident-hash used to find the content
within the context of a Collection ident_hash.
"""
id, version = get_id_n_version(ident_hash)
filename = 'index.cnxml.html'
if context is not None:
stmt = _get_sql('get-baked-content.sql')
args = dict(id=id, version=version, context=context)
else:
stmt = _get_sql('get-content.sql')
args = dict(id=id, version=version, filename=filename)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
content, _ = cursor.fetchone()
except TypeError:
raise ContentNotFound(ident_hash, context, filename)
return content[:] | python | def get_content(ident_hash, context=None):
"""Returns the content for the given ``ident_hash``.
``context`` is optionally ident-hash used to find the content
within the context of a Collection ident_hash.
"""
id, version = get_id_n_version(ident_hash)
filename = 'index.cnxml.html'
if context is not None:
stmt = _get_sql('get-baked-content.sql')
args = dict(id=id, version=version, context=context)
else:
stmt = _get_sql('get-content.sql')
args = dict(id=id, version=version, filename=filename)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
content, _ = cursor.fetchone()
except TypeError:
raise ContentNotFound(ident_hash, context, filename)
return content[:] | [
"def",
"get_content",
"(",
"ident_hash",
",",
"context",
"=",
"None",
")",
":",
"id",
",",
"version",
"=",
"get_id_n_version",
"(",
"ident_hash",
")",
"filename",
"=",
"'index.cnxml.html'",
"if",
"context",
"is",
"not",
"None",
":",
"stmt",
"=",
"_get_sql",
... | Returns the content for the given ``ident_hash``.
``context`` is optionally ident-hash used to find the content
within the context of a Collection ident_hash. | [
"Returns",
"the",
"content",
"for",
"the",
"given",
"ident_hash",
".",
"context",
"is",
"optionally",
"ident",
"-",
"hash",
"used",
"to",
"find",
"the",
"content",
"within",
"the",
"context",
"of",
"a",
"Collection",
"ident_hash",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L114-L137 | train | 48,665 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | get_file | def get_file(hash):
"""Return the contents of the file as a ``memoryview``."""
stmt = _get_sql('get-file.sql')
args = dict(hash=hash)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
file, _ = cursor.fetchone()
except TypeError:
raise FileNotFound(hash)
return memoryview(file[:]) | python | def get_file(hash):
"""Return the contents of the file as a ``memoryview``."""
stmt = _get_sql('get-file.sql')
args = dict(hash=hash)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
file, _ = cursor.fetchone()
except TypeError:
raise FileNotFound(hash)
return memoryview(file[:]) | [
"def",
"get_file",
"(",
"hash",
")",
":",
"stmt",
"=",
"_get_sql",
"(",
"'get-file.sql'",
")",
"args",
"=",
"dict",
"(",
"hash",
"=",
"hash",
")",
"with",
"db_connect",
"(",
")",
"as",
"db_conn",
":",
"with",
"db_conn",
".",
"cursor",
"(",
")",
"as",... | Return the contents of the file as a ``memoryview``. | [
"Return",
"the",
"contents",
"of",
"the",
"file",
"as",
"a",
"memoryview",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L164-L176 | train | 48,666 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | get_registered_files | def get_registered_files(ident_hash):
"""Returns a list SHA1 hashes for registered file entries
identified by the given module ``ident_hash``.
Note, it's possible for a module to reference a file without having
a registered file entry for it.
Note, all files are included, including the raw form of the content.
"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-registered-files-info.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
rows = cursor.fetchall()
if rows is None:
rows = []
hashes = list(set([sha1 for sha1, _, __ in rows]))
return hashes | python | def get_registered_files(ident_hash):
"""Returns a list SHA1 hashes for registered file entries
identified by the given module ``ident_hash``.
Note, it's possible for a module to reference a file without having
a registered file entry for it.
Note, all files are included, including the raw form of the content.
"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-registered-files-info.sql')
args = dict(id=id, version=version)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
rows = cursor.fetchall()
if rows is None:
rows = []
hashes = list(set([sha1 for sha1, _, __ in rows]))
return hashes | [
"def",
"get_registered_files",
"(",
"ident_hash",
")",
":",
"id",
",",
"version",
"=",
"get_id_n_version",
"(",
"ident_hash",
")",
"stmt",
"=",
"_get_sql",
"(",
"'get-registered-files-info.sql'",
")",
"args",
"=",
"dict",
"(",
"id",
"=",
"id",
",",
"version",
... | Returns a list SHA1 hashes for registered file entries
identified by the given module ``ident_hash``.
Note, it's possible for a module to reference a file without having
a registered file entry for it.
Note, all files are included, including the raw form of the content. | [
"Returns",
"a",
"list",
"SHA1",
"hashes",
"for",
"registered",
"file",
"entries",
"identified",
"by",
"the",
"given",
"module",
"ident_hash",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L179-L201 | train | 48,667 |
openstax/cnx-archive | cnxarchive/scripts/export_epub/db.py | get_tree | def get_tree(ident_hash, baked=False):
"""Return a tree structure of the Collection"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-tree.sql')
args = dict(id=id, version=version, baked=baked)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
tree = cursor.fetchone()[0]
except TypeError:
raise NotFound(ident_hash)
if tree is None:
raise NotFound(ident_hash)
return tree | python | def get_tree(ident_hash, baked=False):
"""Return a tree structure of the Collection"""
id, version = get_id_n_version(ident_hash)
stmt = _get_sql('get-tree.sql')
args = dict(id=id, version=version, baked=baked)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
tree = cursor.fetchone()[0]
except TypeError:
raise NotFound(ident_hash)
if tree is None:
raise NotFound(ident_hash)
return tree | [
"def",
"get_tree",
"(",
"ident_hash",
",",
"baked",
"=",
"False",
")",
":",
"id",
",",
"version",
"=",
"get_id_n_version",
"(",
"ident_hash",
")",
"stmt",
"=",
"_get_sql",
"(",
"'get-tree.sql'",
")",
"args",
"=",
"dict",
"(",
"id",
"=",
"id",
",",
"ver... | Return a tree structure of the Collection | [
"Return",
"a",
"tree",
"structure",
"of",
"the",
"Collection"
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/export_epub/db.py#L204-L221 | train | 48,668 |
openstax/cnx-archive | cnxarchive/scripts/inject_resource.py | guess_media_type | def guess_media_type(filepath):
"""Returns the media-type of the file at the given ``filepath``"""
o = subprocess.check_output(['file', '--mime-type', '-Lb', filepath])
o = o.strip()
return o | python | def guess_media_type(filepath):
"""Returns the media-type of the file at the given ``filepath``"""
o = subprocess.check_output(['file', '--mime-type', '-Lb', filepath])
o = o.strip()
return o | [
"def",
"guess_media_type",
"(",
"filepath",
")",
":",
"o",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'file'",
",",
"'--mime-type'",
",",
"'-Lb'",
",",
"filepath",
"]",
")",
"o",
"=",
"o",
".",
"strip",
"(",
")",
"return",
"o"
] | Returns the media-type of the file at the given ``filepath`` | [
"Returns",
"the",
"media",
"-",
"type",
"of",
"the",
"file",
"at",
"the",
"given",
"filepath"
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/inject_resource.py#L29-L33 | train | 48,669 |
openstax/cnx-archive | cnxarchive/scripts/inject_resource.py | lookup_module_ident | def lookup_module_ident(id, version):
"""Return the ``module_ident`` for the given ``id`` &
major and minor version as a tuple.
"""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(
"SELECT module_ident FROM modules "
"WHERE uuid = %s "
"AND CONCAT_WS('.', major_version, minor_version) = %s",
(id, version))
try:
mident = cursor.fetchone()[0]
except (IndexError, TypeError):
ident_hash = join_ident_hash(id, version)
raise RuntimeError("Content at {} does not exist."
.format(ident_hash))
return mident | python | def lookup_module_ident(id, version):
"""Return the ``module_ident`` for the given ``id`` &
major and minor version as a tuple.
"""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(
"SELECT module_ident FROM modules "
"WHERE uuid = %s "
"AND CONCAT_WS('.', major_version, minor_version) = %s",
(id, version))
try:
mident = cursor.fetchone()[0]
except (IndexError, TypeError):
ident_hash = join_ident_hash(id, version)
raise RuntimeError("Content at {} does not exist."
.format(ident_hash))
return mident | [
"def",
"lookup_module_ident",
"(",
"id",
",",
"version",
")",
":",
"with",
"db_connect",
"(",
")",
"as",
"db_conn",
":",
"with",
"db_conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"\"SELECT module_ident FROM modules \"",
... | Return the ``module_ident`` for the given ``id`` &
major and minor version as a tuple. | [
"Return",
"the",
"module_ident",
"for",
"the",
"given",
"id",
"&",
"major",
"and",
"minor",
"version",
"as",
"a",
"tuple",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/inject_resource.py#L47-L65 | train | 48,670 |
openstax/cnx-archive | cnxarchive/scripts/inject_resource.py | insert_file | def insert_file(file, media_type):
"""Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
"""
resource_hash = get_file_sha1(file)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s",
(resource_hash,))
try:
fileid = cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO files (file, media_type) "
"VALUES (%s, %s)"
"RETURNING fileid",
(psycopg2.Binary(file.read()), media_type,))
fileid = cursor.fetchone()[0]
return fileid, resource_hash | python | def insert_file(file, media_type):
"""Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
"""
resource_hash = get_file_sha1(file)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s",
(resource_hash,))
try:
fileid = cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO files (file, media_type) "
"VALUES (%s, %s)"
"RETURNING fileid",
(psycopg2.Binary(file.read()), media_type,))
fileid = cursor.fetchone()[0]
return fileid, resource_hash | [
"def",
"insert_file",
"(",
"file",
",",
"media_type",
")",
":",
"resource_hash",
"=",
"get_file_sha1",
"(",
"file",
")",
"with",
"db_connect",
"(",
")",
"as",
"db_conn",
":",
"with",
"db_conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",... | Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file. | [
"Upsert",
"the",
"file",
"and",
"media_type",
"into",
"the",
"files",
"table",
".",
"Returns",
"the",
"fileid",
"and",
"sha1",
"of",
"the",
"upserted",
"file",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/inject_resource.py#L68-L86 | train | 48,671 |
openstax/cnx-archive | cnxarchive/scripts/inject_resource.py | upsert_module_file | def upsert_module_file(module_ident, fileid, filename):
"""Upsert a file associated with ``fileid`` with ``filename``
as a module_files entry associated with content at ``module_ident``.
"""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("SELECT true FROM module_files "
"WHERE module_ident = %s "
"AND filename = %s",
(module_ident, filename,))
try:
cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO module_files "
"(module_ident, fileid, filename) "
"VALUES (%s, %s, %s)",
(module_ident, fileid, filename,))
else:
cursor.execute("UPDATE module_files "
"SET (fileid) = (%s) "
"WHERE module_ident = %s AND filename = %s",
(fileid, module_ident, filename,)) | python | def upsert_module_file(module_ident, fileid, filename):
"""Upsert a file associated with ``fileid`` with ``filename``
as a module_files entry associated with content at ``module_ident``.
"""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("SELECT true FROM module_files "
"WHERE module_ident = %s "
"AND filename = %s",
(module_ident, filename,))
try:
cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO module_files "
"(module_ident, fileid, filename) "
"VALUES (%s, %s, %s)",
(module_ident, fileid, filename,))
else:
cursor.execute("UPDATE module_files "
"SET (fileid) = (%s) "
"WHERE module_ident = %s AND filename = %s",
(fileid, module_ident, filename,)) | [
"def",
"upsert_module_file",
"(",
"module_ident",
",",
"fileid",
",",
"filename",
")",
":",
"with",
"db_connect",
"(",
")",
"as",
"db_conn",
":",
"with",
"db_conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"\"SELECT true... | Upsert a file associated with ``fileid`` with ``filename``
as a module_files entry associated with content at ``module_ident``. | [
"Upsert",
"a",
"file",
"associated",
"with",
"fileid",
"with",
"filename",
"as",
"a",
"module_files",
"entry",
"associated",
"with",
"content",
"at",
"module_ident",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/inject_resource.py#L89-L111 | train | 48,672 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/contour.py | get_contour | def get_contour(mask):
"""Compute the image contour from a mask
The contour is computed in a very inefficient way using scikit-image
and a conversion of float coordinates to pixel coordinates.
Parameters
----------
mask: binary ndarray of shape (M,N) or (K,M,N)
The mask outlining the pixel positions of the event.
If a 3d array is given, then `K` indexes the individual
contours.
Returns
-------
cont: ndarray or list of K ndarrays of shape (J,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
"""
if isinstance(mask, np.ndarray) and len(mask.shape) == 2:
mask = [mask]
ret_list = False
else:
ret_list = True
contours = []
for mi in mask:
c0 = find_contours(mi.transpose(),
level=.9999,
positive_orientation="low",
fully_connected="high")[0]
# round all coordinates to pixel values
c1 = np.asarray(np.round(c0), int)
# remove duplicates
c2 = remove_duplicates(c1)
contours.append(c2)
if ret_list:
return contours
else:
return contours[0] | python | def get_contour(mask):
"""Compute the image contour from a mask
The contour is computed in a very inefficient way using scikit-image
and a conversion of float coordinates to pixel coordinates.
Parameters
----------
mask: binary ndarray of shape (M,N) or (K,M,N)
The mask outlining the pixel positions of the event.
If a 3d array is given, then `K` indexes the individual
contours.
Returns
-------
cont: ndarray or list of K ndarrays of shape (J,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
"""
if isinstance(mask, np.ndarray) and len(mask.shape) == 2:
mask = [mask]
ret_list = False
else:
ret_list = True
contours = []
for mi in mask:
c0 = find_contours(mi.transpose(),
level=.9999,
positive_orientation="low",
fully_connected="high")[0]
# round all coordinates to pixel values
c1 = np.asarray(np.round(c0), int)
# remove duplicates
c2 = remove_duplicates(c1)
contours.append(c2)
if ret_list:
return contours
else:
return contours[0] | [
"def",
"get_contour",
"(",
"mask",
")",
":",
"if",
"isinstance",
"(",
"mask",
",",
"np",
".",
"ndarray",
")",
"and",
"len",
"(",
"mask",
".",
"shape",
")",
"==",
"2",
":",
"mask",
"=",
"[",
"mask",
"]",
"ret_list",
"=",
"False",
"else",
":",
"ret... | Compute the image contour from a mask
The contour is computed in a very inefficient way using scikit-image
and a conversion of float coordinates to pixel coordinates.
Parameters
----------
mask: binary ndarray of shape (M,N) or (K,M,N)
The mask outlining the pixel positions of the event.
If a 3d array is given, then `K` indexes the individual
contours.
Returns
-------
cont: ndarray or list of K ndarrays of shape (J,2)
A 2D array that holds the contour of an event (in pixels)
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour. | [
"Compute",
"the",
"image",
"contour",
"from",
"a",
"mask"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/contour.py#L13-L54 | train | 48,673 |
simse/pymitv | pymitv/discover.py | Discover.scan | def scan(self, stop_on_first=True, base_ip=0):
"""Scans the local network for TVs."""
tvs = []
# Check if base_ip has been passed
if base_ip == 0:
# Find IP address of computer pymitv is running on
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("8.8.8.8", 80))
ip = sock.getsockname()[0]
sock.close()
# Get IP and compose a base like 192.168.1.xxx
ip_parts = ip.split('.')
base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2]
# Loop through every IP and check if TV is alive
for ip_suffix in range(2, 256):
ip_check = '{}.{}'.format(base_ip, ip_suffix)
if self.check_ip(ip_check):
tvs.append(ip_check)
if stop_on_first:
break
return tvs | python | def scan(self, stop_on_first=True, base_ip=0):
"""Scans the local network for TVs."""
tvs = []
# Check if base_ip has been passed
if base_ip == 0:
# Find IP address of computer pymitv is running on
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("8.8.8.8", 80))
ip = sock.getsockname()[0]
sock.close()
# Get IP and compose a base like 192.168.1.xxx
ip_parts = ip.split('.')
base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2]
# Loop through every IP and check if TV is alive
for ip_suffix in range(2, 256):
ip_check = '{}.{}'.format(base_ip, ip_suffix)
if self.check_ip(ip_check):
tvs.append(ip_check)
if stop_on_first:
break
return tvs | [
"def",
"scan",
"(",
"self",
",",
"stop_on_first",
"=",
"True",
",",
"base_ip",
"=",
"0",
")",
":",
"tvs",
"=",
"[",
"]",
"# Check if base_ip has been passed\r",
"if",
"base_ip",
"==",
"0",
":",
"# Find IP address of computer pymitv is running on\r",
"sock",
"=",
... | Scans the local network for TVs. | [
"Scans",
"the",
"local",
"network",
"for",
"TVs",
"."
] | 03213f591d70fbf90ba2b6af372e474c9bfb99f6 | https://github.com/simse/pymitv/blob/03213f591d70fbf90ba2b6af372e474c9bfb99f6/pymitv/discover.py#L13-L39 | train | 48,674 |
simse/pymitv | pymitv/discover.py | Discover.check_ip | def check_ip(ip, log=False):
"""Attempts a connection to the TV and checks if there really is a TV."""
if log:
print('Checking ip: {}...'.format(ip))
request_timeout = 0.1
try:
tv_url = 'http://{}:6095/request?action=isalive'.format(ip)
request = requests.get(tv_url, timeout=request_timeout)
except requests.exceptions.ConnectTimeout:
return False
return request.status_code == 200 | python | def check_ip(ip, log=False):
"""Attempts a connection to the TV and checks if there really is a TV."""
if log:
print('Checking ip: {}...'.format(ip))
request_timeout = 0.1
try:
tv_url = 'http://{}:6095/request?action=isalive'.format(ip)
request = requests.get(tv_url, timeout=request_timeout)
except requests.exceptions.ConnectTimeout:
return False
return request.status_code == 200 | [
"def",
"check_ip",
"(",
"ip",
",",
"log",
"=",
"False",
")",
":",
"if",
"log",
":",
"print",
"(",
"'Checking ip: {}...'",
".",
"format",
"(",
"ip",
")",
")",
"request_timeout",
"=",
"0.1",
"try",
":",
"tv_url",
"=",
"'http://{}:6095/request?action=isalive'",... | Attempts a connection to the TV and checks if there really is a TV. | [
"Attempts",
"a",
"connection",
"to",
"the",
"TV",
"and",
"checks",
"if",
"there",
"really",
"is",
"a",
"TV",
"."
] | 03213f591d70fbf90ba2b6af372e474c9bfb99f6 | https://github.com/simse/pymitv/blob/03213f591d70fbf90ba2b6af372e474c9bfb99f6/pymitv/discover.py#L42-L55 | train | 48,675 |
xenon-middleware/pyxenon | xenon/oop.py | get_field_type | def get_field_type(f):
"""Obtain the type name of a GRPC Message field."""
types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and
getattr(f, t) == f.type)
return next(types) | python | def get_field_type(f):
"""Obtain the type name of a GRPC Message field."""
types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and
getattr(f, t) == f.type)
return next(types) | [
"def",
"get_field_type",
"(",
"f",
")",
":",
"types",
"=",
"(",
"t",
"[",
"5",
":",
"]",
"for",
"t",
"in",
"dir",
"(",
"f",
")",
"if",
"t",
"[",
":",
"4",
"]",
"==",
"'TYPE'",
"and",
"getattr",
"(",
"f",
",",
"t",
")",
"==",
"f",
".",
"ty... | Obtain the type name of a GRPC Message field. | [
"Obtain",
"the",
"type",
"name",
"of",
"a",
"GRPC",
"Message",
"field",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L48-L52 | train | 48,676 |
xenon-middleware/pyxenon | xenon/oop.py | get_field_description | def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower() | python | def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower() | [
"def",
"get_field_description",
"(",
"f",
")",
":",
"type_name",
"=",
"get_field_type",
"(",
"f",
")",
"if",
"type_name",
"==",
"'MESSAGE'",
"and",
"{",
"sf",
".",
"name",
"for",
"sf",
"in",
"f",
".",
"message_type",
".",
"fields",
"}",
"==",
"{",
"'ke... | Get the type description of a GRPC Message field. | [
"Get",
"the",
"type",
"description",
"of",
"a",
"GRPC",
"Message",
"field",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L55-L66 | train | 48,677 |
xenon-middleware/pyxenon | xenon/oop.py | make_static_request | def make_static_request(method, *args, **kwargs):
"""Creates a request from a static method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
if use_signature:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
None, *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
for k in bound_args:
if isinstance(bound_args[k], Enum):
bound_args[k] = bound_args[k].value
new_kwargs = {kw: v for kw, v in bound_args.items() if kw != 'cls'}
else:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
return method.request_type(**new_kwargs) | python | def make_static_request(method, *args, **kwargs):
"""Creates a request from a static method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
if use_signature:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
None, *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
for k in bound_args:
if isinstance(bound_args[k], Enum):
bound_args[k] = bound_args[k].value
new_kwargs = {kw: v for kw, v in bound_args.items() if kw != 'cls'}
else:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
return method.request_type(**new_kwargs) | [
"def",
"make_static_request",
"(",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
"and",
"not",
"use_signature",
":",
"raise",
"NotImplementedError",
"(",
"\"Only keyword arguments allowed in Python2\"",
")",
"if",
"use_signature",
":... | Creates a request from a static method function call. | [
"Creates",
"a",
"request",
"from",
"a",
"static",
"method",
"function",
"call",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L188-L209 | train | 48,678 |
xenon-middleware/pyxenon | xenon/oop.py | make_request | def make_request(self, method, *args, **kwargs):
"""Creates a request from a method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs) | python | def make_request(self, method, *args, **kwargs):
"""Creates a request from a method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs) | [
"def",
"make_request",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
"and",
"not",
"use_signature",
":",
"raise",
"NotImplementedError",
"(",
"\"Only keyword arguments allowed in Python2\"",
")",
"new_kwargs",
"="... | Creates a request from a method function call. | [
"Creates",
"a",
"request",
"from",
"a",
"method",
"function",
"call",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L212-L248 | train | 48,679 |
xenon-middleware/pyxenon | xenon/oop.py | method_wrapper | def method_wrapper(m):
"""Generates a method from a `GrpcMethod` definition."""
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method | python | def method_wrapper(m):
"""Generates a method from a `GrpcMethod` definition."""
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method | [
"def",
"method_wrapper",
"(",
"m",
")",
":",
"if",
"m",
".",
"is_simple",
":",
"def",
"simple_method",
"(",
"self",
")",
":",
"\"\"\"TODO: no docstring!\"\"\"",
"return",
"apply_transform",
"(",
"self",
".",
"__service__",
",",
"m",
".",
"output_transform",
",... | Generates a method from a `GrpcMethod` definition. | [
"Generates",
"a",
"method",
"from",
"a",
"GrpcMethod",
"definition",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L276-L316 | train | 48,680 |
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.request_name | def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request" | python | def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request" | [
"def",
"request_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"static",
"and",
"not",
"self",
".",
"uses_request",
":",
"return",
"'Empty'",
"if",
"not",
"self",
".",
"uses_request",
":",
"return",
"None",
"if",
"isinstance",
"(",
"self",
".",
"uses_r... | Generate the name of the request. | [
"Generate",
"the",
"name",
"of",
"the",
"request",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L104-L115 | train | 48,681 |
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.request_type | def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name) | python | def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name) | [
"def",
"request_type",
"(",
"self",
")",
":",
"if",
"self",
".",
"static",
"and",
"not",
"self",
".",
"uses_request",
":",
"return",
"getattr",
"(",
"xenon_pb2",
",",
"'Empty'",
")",
"if",
"not",
"self",
".",
"uses_request",
":",
"return",
"None",
"retur... | Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`. | [
"Retrieve",
"the",
"type",
"of",
"the",
"request",
"by",
"fetching",
"it",
"from",
"xenon",
".",
"proto",
".",
"xenon_pb2",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L118-L127 | train | 48,682 |
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.signature | def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters) | python | def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters) | [
"def",
"signature",
"(",
"self",
")",
":",
"if",
"not",
"use_signature",
":",
"raise",
"NotImplementedError",
"(",
"\"Python 3 only.\"",
")",
"if",
"self",
".",
"static",
":",
"parameters",
"=",
"(",
"Parameter",
"(",
"name",
"=",
"'cls'",
",",
"kind",
"="... | Create a signature for this method, only in Python > 3.4 | [
"Create",
"a",
"signature",
"for",
"this",
"method",
"only",
"in",
"Python",
">",
"3",
".",
"4"
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L131-L161 | train | 48,683 |
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.docstring | def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s | python | def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s | [
"def",
"docstring",
"(",
"self",
",",
"servicer",
")",
":",
"s",
"=",
"getattr",
"(",
"servicer",
",",
"to_lower_camel_case",
"(",
"self",
".",
"name",
")",
")",
".",
"__doc__",
"or",
"\"TODO: no docstring in .proto file\"",
"if",
"self",
".",
"uses_request",
... | Generate a doc-string. | [
"Generate",
"a",
"doc",
"-",
"string",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L164-L178 | train | 48,684 |
openstax/cnx-archive | cnxarchive/utils/text.py | slugify | def slugify(string):
"""Return a slug for the unicode_string.
(lowercase, only letters and numbers, hyphens replace spaces)
"""
filtered_string = []
if isinstance(string, str):
string = unicode(string, 'utf-8')
for i in unicodedata.normalize('NFKC', string):
cat = unicodedata.category(i)[0]
# filter out all the non letter and non number characters from the
# input (L is letter and N is number)
if cat in 'LN' or i in '-_':
filtered_string.append(i)
elif cat in 'Z':
filtered_string.append(' ')
return re.sub('\s+', '-', ''.join(filtered_string)).lower() | python | def slugify(string):
"""Return a slug for the unicode_string.
(lowercase, only letters and numbers, hyphens replace spaces)
"""
filtered_string = []
if isinstance(string, str):
string = unicode(string, 'utf-8')
for i in unicodedata.normalize('NFKC', string):
cat = unicodedata.category(i)[0]
# filter out all the non letter and non number characters from the
# input (L is letter and N is number)
if cat in 'LN' or i in '-_':
filtered_string.append(i)
elif cat in 'Z':
filtered_string.append(' ')
return re.sub('\s+', '-', ''.join(filtered_string)).lower() | [
"def",
"slugify",
"(",
"string",
")",
":",
"filtered_string",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"string",
",",
"str",
")",
":",
"string",
"=",
"unicode",
"(",
"string",
",",
"'utf-8'",
")",
"for",
"i",
"in",
"unicodedata",
".",
"normalize",
"(",
... | Return a slug for the unicode_string.
(lowercase, only letters and numbers, hyphens replace spaces) | [
"Return",
"a",
"slug",
"for",
"the",
"unicode_string",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/utils/text.py#L16-L32 | train | 48,685 |
ZELLMECHANIK-DRESDEN/dclab | dclab/external/skimage/_find_contours.py | find_contours | def find_contours(array, level,
fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
array : 2D ndarray of double
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : either 'low' or 'high'
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[ 0. , 0.5],
[ 0.5, 0. ]])]
"""
array = np.asarray(array, dtype=np.double)
if array.ndim != 2:
raise ValueError('Only 2D arrays are supported.')
level = float(level)
if (fully_connected not in _param_options or
positive_orientation not in _param_options):
raise ValueError('Parameters "fully_connected" and'
' "positive_orientation" must be either "high" or'
' "low".')
point_list = _find_contours_cy.iterate_and_store(array, level,
fully_connected == 'high')
contours = _assemble_contours(_take_2(point_list))
if positive_orientation == 'high':
contours = [c[::-1] for c in contours]
return contours | python | def find_contours(array, level,
fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
array : 2D ndarray of double
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : either 'low' or 'high'
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[ 0. , 0.5],
[ 0.5, 0. ]])]
"""
array = np.asarray(array, dtype=np.double)
if array.ndim != 2:
raise ValueError('Only 2D arrays are supported.')
level = float(level)
if (fully_connected not in _param_options or
positive_orientation not in _param_options):
raise ValueError('Parameters "fully_connected" and'
' "positive_orientation" must be either "high" or'
' "low".')
point_list = _find_contours_cy.iterate_and_store(array, level,
fully_connected == 'high')
contours = _assemble_contours(_take_2(point_list))
if positive_orientation == 'high':
contours = [c[::-1] for c in contours]
return contours | [
"def",
"find_contours",
"(",
"array",
",",
"level",
",",
"fully_connected",
"=",
"'low'",
",",
"positive_orientation",
"=",
"'low'",
")",
":",
"array",
"=",
"np",
".",
"asarray",
"(",
"array",
",",
"dtype",
"=",
"np",
".",
"double",
")",
"if",
"array",
... | Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
array : 2D ndarray of double
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : either 'low' or 'high'
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[ 0. , 0.5],
[ 0.5, 0. ]])] | [
"Find",
"iso",
"-",
"valued",
"contours",
"in",
"a",
"2D",
"array",
"for",
"a",
"given",
"level",
"value",
"."
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/external/skimage/_find_contours.py#L9-L124 | train | 48,686 |
openstax/cnx-archive | cnxarchive/database.py | get_module_ident_from_ident_hash | def get_module_ident_from_ident_hash(ident_hash, cursor):
"""Return the moduleid for a given ``ident_hash``."""
try:
uuid, (mj_ver, mn_ver) = split_ident_hash(
ident_hash, split_version=True)
except IdentHashMissingVersion as e:
uuid, mj_ver, mn_ver = e.id, None, None
args = [uuid]
stmt = "SELECT module_ident FROM {} WHERE uuid = %s"
table_name = 'modules'
if mj_ver is None:
table_name = 'latest_modules'
else:
args.append(mj_ver)
stmt += " AND major_version = %s"
if mn_ver is not None:
args.append(mn_ver)
stmt += " AND minor_version = %s"
stmt = stmt.format(table_name)
cursor.execute(stmt, args)
try:
module_ident = cursor.fetchone()[0]
except TypeError: # NoneType
module_ident = None
return module_ident | python | def get_module_ident_from_ident_hash(ident_hash, cursor):
"""Return the moduleid for a given ``ident_hash``."""
try:
uuid, (mj_ver, mn_ver) = split_ident_hash(
ident_hash, split_version=True)
except IdentHashMissingVersion as e:
uuid, mj_ver, mn_ver = e.id, None, None
args = [uuid]
stmt = "SELECT module_ident FROM {} WHERE uuid = %s"
table_name = 'modules'
if mj_ver is None:
table_name = 'latest_modules'
else:
args.append(mj_ver)
stmt += " AND major_version = %s"
if mn_ver is not None:
args.append(mn_ver)
stmt += " AND minor_version = %s"
stmt = stmt.format(table_name)
cursor.execute(stmt, args)
try:
module_ident = cursor.fetchone()[0]
except TypeError: # NoneType
module_ident = None
return module_ident | [
"def",
"get_module_ident_from_ident_hash",
"(",
"ident_hash",
",",
"cursor",
")",
":",
"try",
":",
"uuid",
",",
"(",
"mj_ver",
",",
"mn_ver",
")",
"=",
"split_ident_hash",
"(",
"ident_hash",
",",
"split_version",
"=",
"True",
")",
"except",
"IdentHashMissingVers... | Return the moduleid for a given ``ident_hash``. | [
"Return",
"the",
"moduleid",
"for",
"a",
"given",
"ident_hash",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L98-L123 | train | 48,687 |
openstax/cnx-archive | cnxarchive/database.py | get_tree | def get_tree(ident_hash, cursor, as_collated=False):
"""Return a JSON representation of the binder tree for ``ident_hash``."""
uuid, version = split_ident_hash(ident_hash)
cursor.execute(SQL['get-tree-by-uuid-n-version'],
(uuid, version, as_collated,))
try:
tree = cursor.fetchone()[0]
except TypeError: # NoneType
raise ContentNotFound()
if type(tree) in (type(''), type(u'')):
return json.loads(tree)
else:
return tree | python | def get_tree(ident_hash, cursor, as_collated=False):
"""Return a JSON representation of the binder tree for ``ident_hash``."""
uuid, version = split_ident_hash(ident_hash)
cursor.execute(SQL['get-tree-by-uuid-n-version'],
(uuid, version, as_collated,))
try:
tree = cursor.fetchone()[0]
except TypeError: # NoneType
raise ContentNotFound()
if type(tree) in (type(''), type(u'')):
return json.loads(tree)
else:
return tree | [
"def",
"get_tree",
"(",
"ident_hash",
",",
"cursor",
",",
"as_collated",
"=",
"False",
")",
":",
"uuid",
",",
"version",
"=",
"split_ident_hash",
"(",
"ident_hash",
")",
"cursor",
".",
"execute",
"(",
"SQL",
"[",
"'get-tree-by-uuid-n-version'",
"]",
",",
"("... | Return a JSON representation of the binder tree for ``ident_hash``. | [
"Return",
"a",
"JSON",
"representation",
"of",
"the",
"binder",
"tree",
"for",
"ident_hash",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L126-L138 | train | 48,688 |
openstax/cnx-archive | cnxarchive/database.py | get_collated_content | def get_collated_content(ident_hash, context_ident_hash, cursor):
"""Return collated content for ``ident_hash``."""
cursor.execute(SQL['get-collated-content'],
(ident_hash, context_ident_hash,))
try:
return cursor.fetchone()[0]
except TypeError: # NoneType
return | python | def get_collated_content(ident_hash, context_ident_hash, cursor):
"""Return collated content for ``ident_hash``."""
cursor.execute(SQL['get-collated-content'],
(ident_hash, context_ident_hash,))
try:
return cursor.fetchone()[0]
except TypeError: # NoneType
return | [
"def",
"get_collated_content",
"(",
"ident_hash",
",",
"context_ident_hash",
",",
"cursor",
")",
":",
"cursor",
".",
"execute",
"(",
"SQL",
"[",
"'get-collated-content'",
"]",
",",
"(",
"ident_hash",
",",
"context_ident_hash",
",",
")",
")",
"try",
":",
"retur... | Return collated content for ``ident_hash``. | [
"Return",
"collated",
"content",
"for",
"ident_hash",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L141-L148 | train | 48,689 |
openstax/cnx-archive | cnxarchive/database.py | get_module_uuid | def get_module_uuid(plpy, moduleid):
"""Retrieve page uuid from legacy moduleid."""
plan = plpy.prepare("SELECT uuid FROM modules WHERE moduleid = $1;",
('text',))
result = plpy.execute(plan, (moduleid,), 1)
if result:
return result[0]['uuid'] | python | def get_module_uuid(plpy, moduleid):
"""Retrieve page uuid from legacy moduleid."""
plan = plpy.prepare("SELECT uuid FROM modules WHERE moduleid = $1;",
('text',))
result = plpy.execute(plan, (moduleid,), 1)
if result:
return result[0]['uuid'] | [
"def",
"get_module_uuid",
"(",
"plpy",
",",
"moduleid",
")",
":",
"plan",
"=",
"plpy",
".",
"prepare",
"(",
"\"SELECT uuid FROM modules WHERE moduleid = $1;\"",
",",
"(",
"'text'",
",",
")",
")",
"result",
"=",
"plpy",
".",
"execute",
"(",
"plan",
",",
"(",
... | Retrieve page uuid from legacy moduleid. | [
"Retrieve",
"page",
"uuid",
"from",
"legacy",
"moduleid",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L151-L157 | train | 48,690 |
openstax/cnx-archive | cnxarchive/database.py | set_version | def set_version(portal_type, legacy_version, td):
"""Set the major_version and minor_version if they are not set."""
modified = 'OK'
legacy_major, legacy_minor = legacy_version.split('.')
if portal_type == 'Collection':
# For collections, both major and minor needs to be set
modified = 'MODIFY'
td['new']['major_version'] = int(legacy_minor)
if td['new']['minor_version'] is None:
td['new']['minor_version'] = 1
elif portal_type == 'Module':
# For modules, major should be set and minor should be None
# N.B. a very few older modules had major=2 and minor zero-based.
# The odd math below adds one to the minor for those
modified = 'MODIFY'
td['new']['major_version'] = int(legacy_minor)+(int(legacy_major)-1)
td['new']['minor_version'] = None
return modified | python | def set_version(portal_type, legacy_version, td):
"""Set the major_version and minor_version if they are not set."""
modified = 'OK'
legacy_major, legacy_minor = legacy_version.split('.')
if portal_type == 'Collection':
# For collections, both major and minor needs to be set
modified = 'MODIFY'
td['new']['major_version'] = int(legacy_minor)
if td['new']['minor_version'] is None:
td['new']['minor_version'] = 1
elif portal_type == 'Module':
# For modules, major should be set and minor should be None
# N.B. a very few older modules had major=2 and minor zero-based.
# The odd math below adds one to the minor for those
modified = 'MODIFY'
td['new']['major_version'] = int(legacy_minor)+(int(legacy_major)-1)
td['new']['minor_version'] = None
return modified | [
"def",
"set_version",
"(",
"portal_type",
",",
"legacy_version",
",",
"td",
")",
":",
"modified",
"=",
"'OK'",
"legacy_major",
",",
"legacy_minor",
"=",
"legacy_version",
".",
"split",
"(",
"'.'",
")",
"if",
"portal_type",
"==",
"'Collection'",
":",
"# For col... | Set the major_version and minor_version if they are not set. | [
"Set",
"the",
"major_version",
"and",
"minor_version",
"if",
"they",
"are",
"not",
"set",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L362-L382 | train | 48,691 |
openstax/cnx-archive | cnxarchive/database.py | republish_module | def republish_module(td, plpy):
"""When a module is republished, create new minor versions of collections.
All collections (including subcollections) that this module is contained
in part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains a chapter sc1 v2.1,
which contains a module m1 v3. When m1 is updated, we will have a new row
in the modules table with m1 v4.
This trigger will create increment the minor versions of c1 and sc1, so
we'll have c1 v2.2, and sc1 v2.2. However, another chapter sc2 will stay
at v2.1.
We need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and sc1 v2.2 and c1 v2.2
instead of sc1 2.1 and c1 v2.1
"""
portal_type = td['new']['portal_type']
modified = 'OK'
moduleid = td['new']['moduleid']
legacy_version = td['new']['version']
submitter = td['new']['submitter']
submitlog = td['new']['submitlog']
modified = set_version(portal_type, legacy_version, td)
current_module_ident = get_current_module_ident(moduleid, plpy)
if current_module_ident:
# need to overide autogen uuid to keep it constant per moduleid
uuid = get_module_uuid(plpy, moduleid)
td['new']['uuid'] = uuid
modified = 'MODIFY'
else:
# nothing to do if the module/collection is new
return modified
if portal_type != 'Module':
# nothing else to do if something else is being published
return modified
# Module is republished
replace_map = {current_module_ident: td['new']['module_ident']}
# find the nested subcollections the module is in, and
# republish them, as well, adding to map, for all collections
# Note that map is for all subcollections, regardless of what
# collection they are contained in.
for sub_id in get_subcols(current_module_ident, plpy):
minor = next_version(sub_id, plpy)
new_subcol_ident = republish_collection(submitter, submitlog,
minor, sub_id, plpy)
replace_map[sub_id] = new_subcol_ident
# Now do each collection that contains this module
for collection_id in get_collections(current_module_ident, plpy):
minor = next_version(collection_id, plpy)
new_ident = republish_collection(submitter, submitlog, minor,
collection_id, plpy)
replace_map[collection_id] = new_ident
rebuild_collection_tree(collection_id, replace_map, plpy)
return modified | python | def republish_module(td, plpy):
"""When a module is republished, create new minor versions of collections.
All collections (including subcollections) that this module is contained
in part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains a chapter sc1 v2.1,
which contains a module m1 v3. When m1 is updated, we will have a new row
in the modules table with m1 v4.
This trigger will create increment the minor versions of c1 and sc1, so
we'll have c1 v2.2, and sc1 v2.2. However, another chapter sc2 will stay
at v2.1.
We need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and sc1 v2.2 and c1 v2.2
instead of sc1 2.1 and c1 v2.1
"""
portal_type = td['new']['portal_type']
modified = 'OK'
moduleid = td['new']['moduleid']
legacy_version = td['new']['version']
submitter = td['new']['submitter']
submitlog = td['new']['submitlog']
modified = set_version(portal_type, legacy_version, td)
current_module_ident = get_current_module_ident(moduleid, plpy)
if current_module_ident:
# need to overide autogen uuid to keep it constant per moduleid
uuid = get_module_uuid(plpy, moduleid)
td['new']['uuid'] = uuid
modified = 'MODIFY'
else:
# nothing to do if the module/collection is new
return modified
if portal_type != 'Module':
# nothing else to do if something else is being published
return modified
# Module is republished
replace_map = {current_module_ident: td['new']['module_ident']}
# find the nested subcollections the module is in, and
# republish them, as well, adding to map, for all collections
# Note that map is for all subcollections, regardless of what
# collection they are contained in.
for sub_id in get_subcols(current_module_ident, plpy):
minor = next_version(sub_id, plpy)
new_subcol_ident = republish_collection(submitter, submitlog,
minor, sub_id, plpy)
replace_map[sub_id] = new_subcol_ident
# Now do each collection that contains this module
for collection_id in get_collections(current_module_ident, plpy):
minor = next_version(collection_id, plpy)
new_ident = republish_collection(submitter, submitlog, minor,
collection_id, plpy)
replace_map[collection_id] = new_ident
rebuild_collection_tree(collection_id, replace_map, plpy)
return modified | [
"def",
"republish_module",
"(",
"td",
",",
"plpy",
")",
":",
"portal_type",
"=",
"td",
"[",
"'new'",
"]",
"[",
"'portal_type'",
"]",
"modified",
"=",
"'OK'",
"moduleid",
"=",
"td",
"[",
"'new'",
"]",
"[",
"'moduleid'",
"]",
"legacy_version",
"=",
"td",
... | When a module is republished, create new minor versions of collections.
All collections (including subcollections) that this module is contained
in part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains a chapter sc1 v2.1,
which contains a module m1 v3. When m1 is updated, we will have a new row
in the modules table with m1 v4.
This trigger will create increment the minor versions of c1 and sc1, so
we'll have c1 v2.2, and sc1 v2.2. However, another chapter sc2 will stay
at v2.1.
We need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and sc1 v2.2 and c1 v2.2
instead of sc1 2.1 and c1 v2.1 | [
"When",
"a",
"module",
"is",
"republished",
"create",
"new",
"minor",
"versions",
"of",
"collections",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L385-L446 | train | 48,692 |
openstax/cnx-archive | cnxarchive/database.py | republish_module_trigger | def republish_module_trigger(plpy, td):
"""Trigger called from postgres database when republishing a module.
When a module is republished, the versions of the collections that it is
part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains module m1 v3
m1 is updated, we have a new row in the modules table with m1 v4
this trigger will create increment the minor version of c1, so we'll have
c1 v2.2
we need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and c1 v2.2 instead of c1 v2.2
"""
# Is this an insert from legacy? Legacy always supplies the version.
is_legacy_publication = td['new']['version'] is not None
if not is_legacy_publication:
# Bail out, because this trigger only applies to legacy publications.
return "OK"
plpy.log('Trigger fired on %s' % (td['new']['moduleid'],))
modified = republish_module(td, plpy)
plpy.log('modified: {}'.format(modified))
plpy.log('insert values:\n{}\n'.format('\n'.join([
'{}: {}'.format(key, value)
for key, value in td['new'].items()])))
return modified | python | def republish_module_trigger(plpy, td):
"""Trigger called from postgres database when republishing a module.
When a module is republished, the versions of the collections that it is
part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains module m1 v3
m1 is updated, we have a new row in the modules table with m1 v4
this trigger will create increment the minor version of c1, so we'll have
c1 v2.2
we need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and c1 v2.2 instead of c1 v2.2
"""
# Is this an insert from legacy? Legacy always supplies the version.
is_legacy_publication = td['new']['version'] is not None
if not is_legacy_publication:
# Bail out, because this trigger only applies to legacy publications.
return "OK"
plpy.log('Trigger fired on %s' % (td['new']['moduleid'],))
modified = republish_module(td, plpy)
plpy.log('modified: {}'.format(modified))
plpy.log('insert values:\n{}\n'.format('\n'.join([
'{}: {}'.format(key, value)
for key, value in td['new'].items()])))
return modified | [
"def",
"republish_module_trigger",
"(",
"plpy",
",",
"td",
")",
":",
"# Is this an insert from legacy? Legacy always supplies the version.",
"is_legacy_publication",
"=",
"td",
"[",
"'new'",
"]",
"[",
"'version'",
"]",
"is",
"not",
"None",
"if",
"not",
"is_legacy_public... | Trigger called from postgres database when republishing a module.
When a module is republished, the versions of the collections that it is
part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains module m1 v3
m1 is updated, we have a new row in the modules table with m1 v4
this trigger will create increment the minor version of c1, so we'll have
c1 v2.2
we need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and c1 v2.2 instead of c1 v2.2 | [
"Trigger",
"called",
"from",
"postgres",
"database",
"when",
"republishing",
"a",
"module",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L449-L480 | train | 48,693 |
openstax/cnx-archive | cnxarchive/database.py | assign_version_default_trigger | def assign_version_default_trigger(plpy, td):
"""Trigger to fill in legacy data fields.
A compatibilty trigger to fill in legacy data fields that are not
populated when inserting publications from cnx-publishing.
If this is not a legacy publication the ``version`` will be set
based on the ``major_version`` value.
"""
modified_state = "OK"
portal_type = td['new']['portal_type']
version = td['new']['version']
minor_version = td['new']['minor_version']
# Set the minor version on collections, because by default it is
# None/Null, which is the correct default for modules.
if minor_version is None and portal_type in ('Collection',
'SubCollection'):
modified_state = "MODIFY"
td['new']['minor_version'] = 1
# Set the legacy version field based on the major version.
if version is None:
major_version = td['new']['major_version']
version = "1.{}".format(major_version)
modified_state = "MODIFY"
td['new']['version'] = version
return modified_state | python | def assign_version_default_trigger(plpy, td):
"""Trigger to fill in legacy data fields.
A compatibilty trigger to fill in legacy data fields that are not
populated when inserting publications from cnx-publishing.
If this is not a legacy publication the ``version`` will be set
based on the ``major_version`` value.
"""
modified_state = "OK"
portal_type = td['new']['portal_type']
version = td['new']['version']
minor_version = td['new']['minor_version']
# Set the minor version on collections, because by default it is
# None/Null, which is the correct default for modules.
if minor_version is None and portal_type in ('Collection',
'SubCollection'):
modified_state = "MODIFY"
td['new']['minor_version'] = 1
# Set the legacy version field based on the major version.
if version is None:
major_version = td['new']['major_version']
version = "1.{}".format(major_version)
modified_state = "MODIFY"
td['new']['version'] = version
return modified_state | [
"def",
"assign_version_default_trigger",
"(",
"plpy",
",",
"td",
")",
":",
"modified_state",
"=",
"\"OK\"",
"portal_type",
"=",
"td",
"[",
"'new'",
"]",
"[",
"'portal_type'",
"]",
"version",
"=",
"td",
"[",
"'new'",
"]",
"[",
"'version'",
"]",
"minor_version... | Trigger to fill in legacy data fields.
A compatibilty trigger to fill in legacy data fields that are not
populated when inserting publications from cnx-publishing.
If this is not a legacy publication the ``version`` will be set
based on the ``major_version`` value. | [
"Trigger",
"to",
"fill",
"in",
"legacy",
"data",
"fields",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L544-L572 | train | 48,694 |
openstax/cnx-archive | cnxarchive/views/exports.py | get_export | def get_export(request):
"""Retrieve an export file."""
settings = get_current_registry().settings
exports_dirs = settings['exports-directories'].split()
args = request.matchdict
ident_hash, type = args['ident_hash'], args['type']
id, version = split_ident_hash(ident_hash)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
try:
results = get_export_files(cursor, id, version, [type],
exports_dirs, read_file=True)
if not results:
raise httpexceptions.HTTPNotFound()
filename, mimetype, size, modtime, state, file_content \
= results[0]
except ExportError as e:
logger.debug(str(e))
raise httpexceptions.HTTPNotFound()
if state == 'missing':
raise httpexceptions.HTTPNotFound()
encoded_filename = urllib.quote(filename.encode('utf-8'))
resp = request.response
resp.status = "200 OK"
resp.content_type = mimetype
# Need both filename and filename* below for various browsers
# See: https://fastmail.blog/2011/06/24/download-non-english-filenames/
resp.content_disposition = "attachment; filename={fname};" \
" filename*=UTF-8''{fname}".format(
fname=encoded_filename)
resp.body = file_content
# Remove version and extension from filename, to recover title slug
slug_title = '-'.join(encoded_filename.split('-')[:-1])
resp.headerlist.append(
('Link', '<https://{}/contents/{}/{}> ;rel="Canonical"'.format(
request.host, id, slug_title)))
return resp | python | def get_export(request):
"""Retrieve an export file."""
settings = get_current_registry().settings
exports_dirs = settings['exports-directories'].split()
args = request.matchdict
ident_hash, type = args['ident_hash'], args['type']
id, version = split_ident_hash(ident_hash)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
try:
results = get_export_files(cursor, id, version, [type],
exports_dirs, read_file=True)
if not results:
raise httpexceptions.HTTPNotFound()
filename, mimetype, size, modtime, state, file_content \
= results[0]
except ExportError as e:
logger.debug(str(e))
raise httpexceptions.HTTPNotFound()
if state == 'missing':
raise httpexceptions.HTTPNotFound()
encoded_filename = urllib.quote(filename.encode('utf-8'))
resp = request.response
resp.status = "200 OK"
resp.content_type = mimetype
# Need both filename and filename* below for various browsers
# See: https://fastmail.blog/2011/06/24/download-non-english-filenames/
resp.content_disposition = "attachment; filename={fname};" \
" filename*=UTF-8''{fname}".format(
fname=encoded_filename)
resp.body = file_content
# Remove version and extension from filename, to recover title slug
slug_title = '-'.join(encoded_filename.split('-')[:-1])
resp.headerlist.append(
('Link', '<https://{}/contents/{}/{}> ;rel="Canonical"'.format(
request.host, id, slug_title)))
return resp | [
"def",
"get_export",
"(",
"request",
")",
":",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"exports_dirs",
"=",
"settings",
"[",
"'exports-directories'",
"]",
".",
"split",
"(",
")",
"args",
"=",
"request",
".",
"matchdict",
"ident_hash... | Retrieve an export file. | [
"Retrieve",
"an",
"export",
"file",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/exports.py#L50-L89 | train | 48,695 |
openstax/cnx-archive | cnxarchive/views/exports.py | get_export_files | def get_export_files(cursor, id, version, types, exports_dirs, read_file=True):
"""Retrieve files associated with document."""
request = get_current_request()
type_info = dict(request.registry.settings['_type_info'])
metadata = get_content_metadata(id, version, cursor)
legacy_id = metadata['legacy_id']
legacy_version = metadata['legacy_version']
reachable_dirs = [dir for dir in exports_dirs if safe_stat(dir)]
# 1 result per type, in the same order as the given types
results = []
for type in list(types):
if type not in type_info:
raise ExportError("invalid type '{}' requested.".format(type))
file_extension = type_info[type]['file_extension']
# skip module PDFs
if metadata['mediaType'] == MODULE_MIMETYPE and \
file_extension == 'pdf':
continue
mimetype = type_info[type]['mimetype']
filename = '{}@{}.{}'.format(id, version, file_extension)
legacy_filenames = [
'{}-{}.{}'.format(legacy_id, legacy_version, ext)
for ext in LEGACY_EXTENSION_MAP[file_extension]
]
slugify_title_filename = u'{}-{}.{}'.format(slugify(metadata['title']),
version, file_extension)
for dir in reachable_dirs:
filepath = os.path.join(dir, filename)
try:
if read_file:
with open(filepath, 'r') as file:
stats = os.fstat(file.fileno())
contents = file.read()
else:
stats = os.stat(filepath)
contents = None
modtime = fromtimestamp(int(stats.st_mtime))
results.append((slugify_title_filename, mimetype,
stats.st_size, modtime, 'good', contents))
break
except EnvironmentError:
pass
else:
# Let's see if the legacy file's there and make the new link
legacy_file_found = False
for dir in reachable_dirs:
filepath = os.path.join(dir, filename)
legacy_filepaths = [os.path.join(dir, fn)
for fn in legacy_filenames]
for legacy_filepath in legacy_filepaths:
try:
if read_file:
with open(legacy_filepath, 'r') as file:
stats = os.fstat(file.fileno())
contents = file.read()
else:
stats = os.stat(legacy_filepath)
contents = None
modtime = fromtimestamp(stats.st_mtime)
os.link(legacy_filepath, filepath)
results.append((slugify_title_filename, mimetype,
stats.st_size, modtime, 'good',
contents))
legacy_file_found = True
break
except EnvironmentError:
pass
if legacy_file_found:
break
else:
filenames = [filename] + legacy_filenames
log_formatted_filenames = '\n'.join([' - {}'.format(x)
for x in filenames])
logger.error("Could not find a file for '{}' at version '{}' "
"with any of the following file names:\n{}"
.format(id, version, log_formatted_filenames))
# No file, return "missing" state
results.append((slugify_title_filename, mimetype,
0, None, 'missing', None))
return results | python | def get_export_files(cursor, id, version, types, exports_dirs, read_file=True):
"""Retrieve files associated with document."""
request = get_current_request()
type_info = dict(request.registry.settings['_type_info'])
metadata = get_content_metadata(id, version, cursor)
legacy_id = metadata['legacy_id']
legacy_version = metadata['legacy_version']
reachable_dirs = [dir for dir in exports_dirs if safe_stat(dir)]
# 1 result per type, in the same order as the given types
results = []
for type in list(types):
if type not in type_info:
raise ExportError("invalid type '{}' requested.".format(type))
file_extension = type_info[type]['file_extension']
# skip module PDFs
if metadata['mediaType'] == MODULE_MIMETYPE and \
file_extension == 'pdf':
continue
mimetype = type_info[type]['mimetype']
filename = '{}@{}.{}'.format(id, version, file_extension)
legacy_filenames = [
'{}-{}.{}'.format(legacy_id, legacy_version, ext)
for ext in LEGACY_EXTENSION_MAP[file_extension]
]
slugify_title_filename = u'{}-{}.{}'.format(slugify(metadata['title']),
version, file_extension)
for dir in reachable_dirs:
filepath = os.path.join(dir, filename)
try:
if read_file:
with open(filepath, 'r') as file:
stats = os.fstat(file.fileno())
contents = file.read()
else:
stats = os.stat(filepath)
contents = None
modtime = fromtimestamp(int(stats.st_mtime))
results.append((slugify_title_filename, mimetype,
stats.st_size, modtime, 'good', contents))
break
except EnvironmentError:
pass
else:
# Let's see if the legacy file's there and make the new link
legacy_file_found = False
for dir in reachable_dirs:
filepath = os.path.join(dir, filename)
legacy_filepaths = [os.path.join(dir, fn)
for fn in legacy_filenames]
for legacy_filepath in legacy_filepaths:
try:
if read_file:
with open(legacy_filepath, 'r') as file:
stats = os.fstat(file.fileno())
contents = file.read()
else:
stats = os.stat(legacy_filepath)
contents = None
modtime = fromtimestamp(stats.st_mtime)
os.link(legacy_filepath, filepath)
results.append((slugify_title_filename, mimetype,
stats.st_size, modtime, 'good',
contents))
legacy_file_found = True
break
except EnvironmentError:
pass
if legacy_file_found:
break
else:
filenames = [filename] + legacy_filenames
log_formatted_filenames = '\n'.join([' - {}'.format(x)
for x in filenames])
logger.error("Could not find a file for '{}' at version '{}' "
"with any of the following file names:\n{}"
.format(id, version, log_formatted_filenames))
# No file, return "missing" state
results.append((slugify_title_filename, mimetype,
0, None, 'missing', None))
return results | [
"def",
"get_export_files",
"(",
"cursor",
",",
"id",
",",
"version",
",",
"types",
",",
"exports_dirs",
",",
"read_file",
"=",
"True",
")",
":",
"request",
"=",
"get_current_request",
"(",
")",
"type_info",
"=",
"dict",
"(",
"request",
".",
"registry",
"."... | Retrieve files associated with document. | [
"Retrieve",
"files",
"associated",
"with",
"document",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/exports.py#L92-L178 | train | 48,696 |
openstax/cnx-archive | cnxarchive/views/helpers.py | get_content_metadata | def get_content_metadata(id, version, cursor):
"""Return metadata related to the content from the database."""
# Do the module lookup
args = dict(id=id, version=version)
# FIXME We are doing two queries here that can hopefully be
# condensed into one.
cursor.execute(SQL['get-module-metadata'], args)
try:
result = cursor.fetchone()[0]
# version is what we want to return, but in the sql we're using
# current_version because otherwise there's a "column reference is
# ambiguous" error
result['version'] = result.pop('current_version')
# FIXME We currently have legacy 'portal_type' names in the database.
# Future upgrades should replace the portal type with a mimetype
# of 'application/vnd.org.cnx.(module|collection|folder|<etc>)'.
# Until then we will do the replacement here.
result['mediaType'] = portaltype_to_mimetype(result['mediaType'])
return result
except (TypeError, IndexError,): # None returned
raise httpexceptions.HTTPNotFound() | python | def get_content_metadata(id, version, cursor):
"""Return metadata related to the content from the database."""
# Do the module lookup
args = dict(id=id, version=version)
# FIXME We are doing two queries here that can hopefully be
# condensed into one.
cursor.execute(SQL['get-module-metadata'], args)
try:
result = cursor.fetchone()[0]
# version is what we want to return, but in the sql we're using
# current_version because otherwise there's a "column reference is
# ambiguous" error
result['version'] = result.pop('current_version')
# FIXME We currently have legacy 'portal_type' names in the database.
# Future upgrades should replace the portal type with a mimetype
# of 'application/vnd.org.cnx.(module|collection|folder|<etc>)'.
# Until then we will do the replacement here.
result['mediaType'] = portaltype_to_mimetype(result['mediaType'])
return result
except (TypeError, IndexError,): # None returned
raise httpexceptions.HTTPNotFound() | [
"def",
"get_content_metadata",
"(",
"id",
",",
"version",
",",
"cursor",
")",
":",
"# Do the module lookup",
"args",
"=",
"dict",
"(",
"id",
"=",
"id",
",",
"version",
"=",
"version",
")",
"# FIXME We are doing two queries here that can hopefully be",
"# condens... | Return metadata related to the content from the database. | [
"Return",
"metadata",
"related",
"to",
"the",
"content",
"from",
"the",
"database",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/helpers.py#L61-L83 | train | 48,697 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_contours.py | find_contours_level | def find_contours_level(density, x, y, level, closed=False):
"""Find iso-valued density contours for a given level value
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
level: float between 0 and 1
Value along which to find contours in `kde` relative
to its maximum kde
Returns
-------
contours: list of ndarrays of shape (P, 2)
Contours found for the given level value
See Also
--------
skimage.measure.find_contours: Contour finding algorithm used
"""
if level >= 1 or level <= 0:
raise ValueError("`level` must be in (0,1), got '{}'!".format(level))
# level relative to maximum
level = level * density.max()
# xy coordinates
if len(x.shape) == 2:
assert np.all(x[:, 0] == x[:, 1])
x = x[:, 0]
if len(y.shape) == 2:
assert np.all(y[0, :] == y[1, :])
y = y[0, :]
if closed:
# find closed contours
density = np.pad(density, ((1, 1), (1, 1)), mode="constant")
offset = 1
else:
# leave contours open at kde boundary
offset = 0
conts_idx = find_contours(density, level)
conts_xy = []
for cc in conts_idx:
cx = np.interp(x=cc[:, 0]-offset,
xp=range(x.size),
fp=x)
cy = np.interp(x=cc[:, 1]-offset,
xp=range(y.size),
fp=y)
conts_xy.append(np.stack((cx, cy), axis=1))
return conts_xy | python | def find_contours_level(density, x, y, level, closed=False):
"""Find iso-valued density contours for a given level value
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
level: float between 0 and 1
Value along which to find contours in `kde` relative
to its maximum kde
Returns
-------
contours: list of ndarrays of shape (P, 2)
Contours found for the given level value
See Also
--------
skimage.measure.find_contours: Contour finding algorithm used
"""
if level >= 1 or level <= 0:
raise ValueError("`level` must be in (0,1), got '{}'!".format(level))
# level relative to maximum
level = level * density.max()
# xy coordinates
if len(x.shape) == 2:
assert np.all(x[:, 0] == x[:, 1])
x = x[:, 0]
if len(y.shape) == 2:
assert np.all(y[0, :] == y[1, :])
y = y[0, :]
if closed:
# find closed contours
density = np.pad(density, ((1, 1), (1, 1)), mode="constant")
offset = 1
else:
# leave contours open at kde boundary
offset = 0
conts_idx = find_contours(density, level)
conts_xy = []
for cc in conts_idx:
cx = np.interp(x=cc[:, 0]-offset,
xp=range(x.size),
fp=x)
cy = np.interp(x=cc[:, 1]-offset,
xp=range(y.size),
fp=y)
conts_xy.append(np.stack((cx, cy), axis=1))
return conts_xy | [
"def",
"find_contours_level",
"(",
"density",
",",
"x",
",",
"y",
",",
"level",
",",
"closed",
"=",
"False",
")",
":",
"if",
"level",
">=",
"1",
"or",
"level",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"`level` must be in (0,1), got '{}'!\"",
".",
"for... | Find iso-valued density contours for a given level value
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
level: float between 0 and 1
Value along which to find contours in `kde` relative
to its maximum kde
Returns
-------
contours: list of ndarrays of shape (P, 2)
Contours found for the given level value
See Also
--------
skimage.measure.find_contours: Contour finding algorithm used | [
"Find",
"iso",
"-",
"valued",
"density",
"contours",
"for",
"a",
"given",
"level",
"value"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_contours.py#L12-L67 | train | 48,698 |
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_contours.py | get_quantile_levels | def get_quantile_levels(density, x, y, xp, yp, q, normalize=True):
"""Compute density levels for given quantiles by interpolation
For a given 2D density, compute the density levels at which
the resulting contours contain the fraction `1-q` of all
data points. E.g. for a measurement of 1000 events, all
contours at the level corresponding to a quantile of
`q=0.95` (95th percentile) contain 50 events (5%).
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
xp: 1d ndarray of size D
Event x-data from which to compute the quantile
yp: 1d ndarray of size D
Event y-data from which to compute the quantile
q: array_like or float between 0 and 1
Quantile along which to find contours in `kde` relative
to its maximum
normalize: bool
Whether output levels should be normalized to the maximum
of `density`
Returns
-------
level: float
Contours level corresponding to the given quantile
Notes
-----
NaN-values events in `xp` and `yp` are ignored.
"""
# xy coordinates
if len(x.shape) == 2:
assert np.all(x[:, 0] == x[:, 1])
x = x[:, 0]
if len(y.shape) == 2:
assert np.all(y[0, :] == y[1, :])
y = y[0, :]
# remove bad events
bad = get_bad_vals(xp, yp)
xp = xp[~bad]
yp = yp[~bad]
# Normalize interpolation data such that the spacing for
# x and y is about the same during interpolation.
x_norm = x.max()
x = x / x_norm
xp = xp / x_norm
y_norm = y.max()
y = y / y_norm
yp = yp / y_norm
# Perform interpolation
dp = spint.interpn((x, y), density,
(xp, yp),
method='linear',
bounds_error=False,
fill_value=0)
if normalize:
dp /= density.max()
if not np.isscalar(q):
q = np.array(q)
plev = np.nanpercentile(dp, q=q*100)
return plev | python | def get_quantile_levels(density, x, y, xp, yp, q, normalize=True):
"""Compute density levels for given quantiles by interpolation
For a given 2D density, compute the density levels at which
the resulting contours contain the fraction `1-q` of all
data points. E.g. for a measurement of 1000 events, all
contours at the level corresponding to a quantile of
`q=0.95` (95th percentile) contain 50 events (5%).
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
xp: 1d ndarray of size D
Event x-data from which to compute the quantile
yp: 1d ndarray of size D
Event y-data from which to compute the quantile
q: array_like or float between 0 and 1
Quantile along which to find contours in `kde` relative
to its maximum
normalize: bool
Whether output levels should be normalized to the maximum
of `density`
Returns
-------
level: float
Contours level corresponding to the given quantile
Notes
-----
NaN-values events in `xp` and `yp` are ignored.
"""
# xy coordinates
if len(x.shape) == 2:
assert np.all(x[:, 0] == x[:, 1])
x = x[:, 0]
if len(y.shape) == 2:
assert np.all(y[0, :] == y[1, :])
y = y[0, :]
# remove bad events
bad = get_bad_vals(xp, yp)
xp = xp[~bad]
yp = yp[~bad]
# Normalize interpolation data such that the spacing for
# x and y is about the same during interpolation.
x_norm = x.max()
x = x / x_norm
xp = xp / x_norm
y_norm = y.max()
y = y / y_norm
yp = yp / y_norm
# Perform interpolation
dp = spint.interpn((x, y), density,
(xp, yp),
method='linear',
bounds_error=False,
fill_value=0)
if normalize:
dp /= density.max()
if not np.isscalar(q):
q = np.array(q)
plev = np.nanpercentile(dp, q=q*100)
return plev | [
"def",
"get_quantile_levels",
"(",
"density",
",",
"x",
",",
"y",
",",
"xp",
",",
"yp",
",",
"q",
",",
"normalize",
"=",
"True",
")",
":",
"# xy coordinates",
"if",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
":",
"assert",
"np",
".",
"all",
"... | Compute density levels for given quantiles by interpolation
For a given 2D density, compute the density levels at which
the resulting contours contain the fraction `1-q` of all
data points. E.g. for a measurement of 1000 events, all
contours at the level corresponding to a quantile of
`q=0.95` (95th percentile) contain 50 events (5%).
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
xp: 1d ndarray of size D
Event x-data from which to compute the quantile
yp: 1d ndarray of size D
Event y-data from which to compute the quantile
q: array_like or float between 0 and 1
Quantile along which to find contours in `kde` relative
to its maximum
normalize: bool
Whether output levels should be normalized to the maximum
of `density`
Returns
-------
level: float
Contours level corresponding to the given quantile
Notes
-----
NaN-values events in `xp` and `yp` are ignored. | [
"Compute",
"density",
"levels",
"for",
"given",
"quantiles",
"by",
"interpolation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_contours.py#L70-L143 | train | 48,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.