repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead._extract_alternative_fields | def _extract_alternative_fields(self, fields, default=None, field_type=float):
'''
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
'''
for field in fields:
if field in self.tags:
if field_type is float:
value = eval_frac(self.tags[field].values[0])
if field_type is str:
value = str(self.tags[field].values)
if field_type is int:
value = int(self.tags[field].values[0])
return value, field
return default, None | python | def _extract_alternative_fields(self, fields, default=None, field_type=float):
'''
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
'''
for field in fields:
if field in self.tags:
if field_type is float:
value = eval_frac(self.tags[field].values[0])
if field_type is str:
value = str(self.tags[field].values)
if field_type is int:
value = int(self.tags[field].values[0])
return value, field
return default, None | [
"def",
"_extract_alternative_fields",
"(",
"self",
",",
"fields",
",",
"default",
"=",
"None",
",",
"field_type",
"=",
"float",
")",
":",
"for",
"field",
"in",
"fields",
":",
"if",
"field",
"in",
"self",
".",
"tags",
":",
"if",
"field_type",
"is",
"float... | Extract a value for a list of ordered fields.
Return the value of the first existed field in the list | [
"Extract",
"a",
"value",
"for",
"a",
"list",
"of",
"ordered",
"fields",
".",
"Return",
"the",
"value",
"of",
"the",
"first",
"existed",
"field",
"in",
"the",
"list"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L89-L103 | train | 201,600 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.extract_geo | def extract_geo(self):
'''
Extract geo-related information from exif
'''
altitude = self.extract_altitude()
dop = self.extract_dop()
lon, lat = self.extract_lon_lat()
d = {}
if lon is not None and lat is not None:
d['latitude'] = lat
d['longitude'] = lon
if altitude is not None:
d['altitude'] = altitude
if dop is not None:
d['dop'] = dop
return d | python | def extract_geo(self):
'''
Extract geo-related information from exif
'''
altitude = self.extract_altitude()
dop = self.extract_dop()
lon, lat = self.extract_lon_lat()
d = {}
if lon is not None and lat is not None:
d['latitude'] = lat
d['longitude'] = lon
if altitude is not None:
d['altitude'] = altitude
if dop is not None:
d['dop'] = dop
return d | [
"def",
"extract_geo",
"(",
"self",
")",
":",
"altitude",
"=",
"self",
".",
"extract_altitude",
"(",
")",
"dop",
"=",
"self",
".",
"extract_dop",
"(",
")",
"lon",
",",
"lat",
"=",
"self",
".",
"extract_lon_lat",
"(",
")",
"d",
"=",
"{",
"}",
"if",
"... | Extract geo-related information from exif | [
"Extract",
"geo",
"-",
"related",
"information",
"from",
"exif"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L209-L224 | train | 201,601 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.extract_gps_time | def extract_gps_time(self):
'''
Extract timestamp from GPS field.
'''
gps_date_field = "GPS GPSDate"
gps_time_field = "GPS GPSTimeStamp"
gps_time = 0
if gps_date_field in self.tags and gps_time_field in self.tags:
date = str(self.tags[gps_date_field].values).split(":")
if int(date[0]) == 0 or int(date[1]) == 0 or int(date[2]) == 0:
return None
t = self.tags[gps_time_field]
gps_time = datetime.datetime(
year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
hour=int(eval_frac(t.values[0])),
minute=int(eval_frac(t.values[1])),
second=int(eval_frac(t.values[2])),
)
microseconds = datetime.timedelta(
microseconds=int((eval_frac(t.values[2]) % 1) * 1e6))
gps_time += microseconds
return gps_time | python | def extract_gps_time(self):
'''
Extract timestamp from GPS field.
'''
gps_date_field = "GPS GPSDate"
gps_time_field = "GPS GPSTimeStamp"
gps_time = 0
if gps_date_field in self.tags and gps_time_field in self.tags:
date = str(self.tags[gps_date_field].values).split(":")
if int(date[0]) == 0 or int(date[1]) == 0 or int(date[2]) == 0:
return None
t = self.tags[gps_time_field]
gps_time = datetime.datetime(
year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
hour=int(eval_frac(t.values[0])),
minute=int(eval_frac(t.values[1])),
second=int(eval_frac(t.values[2])),
)
microseconds = datetime.timedelta(
microseconds=int((eval_frac(t.values[2]) % 1) * 1e6))
gps_time += microseconds
return gps_time | [
"def",
"extract_gps_time",
"(",
"self",
")",
":",
"gps_date_field",
"=",
"\"GPS GPSDate\"",
"gps_time_field",
"=",
"\"GPS GPSTimeStamp\"",
"gps_time",
"=",
"0",
"if",
"gps_date_field",
"in",
"self",
".",
"tags",
"and",
"gps_time_field",
"in",
"self",
".",
"tags",
... | Extract timestamp from GPS field. | [
"Extract",
"timestamp",
"from",
"GPS",
"field",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L226-L249 | train | 201,602 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.extract_exif | def extract_exif(self):
'''
Extract a list of exif infos
'''
width, height = self.extract_image_size()
make, model = self.extract_make(), self.extract_model()
orientation = self.extract_orientation()
geo = self.extract_geo()
capture = self.extract_capture_time()
direction = self.extract_direction()
d = {
'width': width,
'height': height,
'orientation': orientation,
'direction': direction,
'make': make,
'model': model,
'capture_time': capture
}
d['gps'] = geo
return d | python | def extract_exif(self):
'''
Extract a list of exif infos
'''
width, height = self.extract_image_size()
make, model = self.extract_make(), self.extract_model()
orientation = self.extract_orientation()
geo = self.extract_geo()
capture = self.extract_capture_time()
direction = self.extract_direction()
d = {
'width': width,
'height': height,
'orientation': orientation,
'direction': direction,
'make': make,
'model': model,
'capture_time': capture
}
d['gps'] = geo
return d | [
"def",
"extract_exif",
"(",
"self",
")",
":",
"width",
",",
"height",
"=",
"self",
".",
"extract_image_size",
"(",
")",
"make",
",",
"model",
"=",
"self",
".",
"extract_make",
"(",
")",
",",
"self",
".",
"extract_model",
"(",
")",
"orientation",
"=",
"... | Extract a list of exif infos | [
"Extract",
"a",
"list",
"of",
"exif",
"infos"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L251-L271 | train | 201,603 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.extract_image_size | def extract_image_size(self):
'''
Extract image height and width
'''
width, _ = self._extract_alternative_fields(
['Image ImageWidth', 'EXIF ExifImageWidth'], -1, int)
height, _ = self._extract_alternative_fields(
['Image ImageLength', 'EXIF ExifImageLength'], -1, int)
return width, height | python | def extract_image_size(self):
'''
Extract image height and width
'''
width, _ = self._extract_alternative_fields(
['Image ImageWidth', 'EXIF ExifImageWidth'], -1, int)
height, _ = self._extract_alternative_fields(
['Image ImageLength', 'EXIF ExifImageLength'], -1, int)
return width, height | [
"def",
"extract_image_size",
"(",
"self",
")",
":",
"width",
",",
"_",
"=",
"self",
".",
"_extract_alternative_fields",
"(",
"[",
"'Image ImageWidth'",
",",
"'EXIF ExifImageWidth'",
"]",
",",
"-",
"1",
",",
"int",
")",
"height",
",",
"_",
"=",
"self",
".",... | Extract image height and width | [
"Extract",
"image",
"height",
"and",
"width"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L273-L281 | train | 201,604 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.extract_make | def extract_make(self):
'''
Extract camera make
'''
fields = ['EXIF LensMake', 'Image Make']
make, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return make | python | def extract_make(self):
'''
Extract camera make
'''
fields = ['EXIF LensMake', 'Image Make']
make, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return make | [
"def",
"extract_make",
"(",
"self",
")",
":",
"fields",
"=",
"[",
"'EXIF LensMake'",
",",
"'Image Make'",
"]",
"make",
",",
"_",
"=",
"self",
".",
"_extract_alternative_fields",
"(",
"fields",
",",
"default",
"=",
"'none'",
",",
"field_type",
"=",
"str",
"... | Extract camera make | [
"Extract",
"camera",
"make"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L306-L313 | train | 201,605 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.extract_model | def extract_model(self):
'''
Extract camera model
'''
fields = ['EXIF LensModel', 'Image Model']
model, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return model | python | def extract_model(self):
'''
Extract camera model
'''
fields = ['EXIF LensModel', 'Image Model']
model, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return model | [
"def",
"extract_model",
"(",
"self",
")",
":",
"fields",
"=",
"[",
"'EXIF LensModel'",
",",
"'Image Model'",
"]",
"model",
",",
"_",
"=",
"self",
".",
"_extract_alternative_fields",
"(",
"fields",
",",
"default",
"=",
"'none'",
",",
"field_type",
"=",
"str",... | Extract camera model | [
"Extract",
"camera",
"model"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L315-L322 | train | 201,606 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.extract_orientation | def extract_orientation(self):
'''
Extract image orientation
'''
fields = ['Image Orientation']
orientation, _ = self._extract_alternative_fields(
fields, default=1, field_type=int)
if orientation not in range(1, 9):
return 1
return orientation | python | def extract_orientation(self):
'''
Extract image orientation
'''
fields = ['Image Orientation']
orientation, _ = self._extract_alternative_fields(
fields, default=1, field_type=int)
if orientation not in range(1, 9):
return 1
return orientation | [
"def",
"extract_orientation",
"(",
"self",
")",
":",
"fields",
"=",
"[",
"'Image Orientation'",
"]",
"orientation",
",",
"_",
"=",
"self",
".",
"_extract_alternative_fields",
"(",
"fields",
",",
"default",
"=",
"1",
",",
"field_type",
"=",
"int",
")",
"if",
... | Extract image orientation | [
"Extract",
"image",
"orientation"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L333-L342 | train | 201,607 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.fields_exist | def fields_exist(self, fields):
'''
Check existence of a list fields in exif
'''
for rexif in fields:
vflag = False
for subrexif in rexif:
if subrexif in self.tags:
vflag = True
if not vflag:
print("Missing required EXIF tag: {0} for image {1}".format(
rexif[0], self.filename))
return False
return True | python | def fields_exist(self, fields):
'''
Check existence of a list fields in exif
'''
for rexif in fields:
vflag = False
for subrexif in rexif:
if subrexif in self.tags:
vflag = True
if not vflag:
print("Missing required EXIF tag: {0} for image {1}".format(
rexif[0], self.filename))
return False
return True | [
"def",
"fields_exist",
"(",
"self",
",",
"fields",
")",
":",
"for",
"rexif",
"in",
"fields",
":",
"vflag",
"=",
"False",
"for",
"subrexif",
"in",
"rexif",
":",
"if",
"subrexif",
"in",
"self",
".",
"tags",
":",
"vflag",
"=",
"True",
"if",
"not",
"vfla... | Check existence of a list fields in exif | [
"Check",
"existence",
"of",
"a",
"list",
"fields",
"in",
"exif"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L360-L373 | train | 201,608 |
mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.mapillary_tag_exists | def mapillary_tag_exists(self):
'''
Check existence of required Mapillary tags
'''
description_tag = "Image ImageDescription"
if description_tag not in self.tags:
return False
for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]:
if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]:
return False
return True | python | def mapillary_tag_exists(self):
'''
Check existence of required Mapillary tags
'''
description_tag = "Image ImageDescription"
if description_tag not in self.tags:
return False
for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]:
if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]:
return False
return True | [
"def",
"mapillary_tag_exists",
"(",
"self",
")",
":",
"description_tag",
"=",
"\"Image ImageDescription\"",
"if",
"description_tag",
"not",
"in",
"self",
".",
"tags",
":",
"return",
"False",
"for",
"requirement",
"in",
"[",
"\"MAPSequenceUUID\"",
",",
"\"MAPSettings... | Check existence of required Mapillary tags | [
"Check",
"existence",
"of",
"required",
"Mapillary",
"tags"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L375-L385 | train | 201,609 |
mapillary/mapillary_tools | bin/download_images.py | query_search_api | def query_search_api(min_lat, max_lat, min_lon, max_lon, max_results):
'''
Send query to the search API and get dict with image data.
'''
# Create URL
params = urllib.urlencode(zip(
['client_id', 'bbox', 'per_page'],
[CLIENT_ID, ','.join([str(min_lon), str(min_lat), str(
max_lon), str(max_lat)]), str(max_results)]
))
print(MAPILLARY_API_IM_SEARCH_URL + params)
# Get data from server, then parse JSON
query = urllib2.urlopen(MAPILLARY_API_IM_SEARCH_URL + params).read()
query = json.loads(query)['features']
print("Result: {0} images in area.".format(len(query)))
return query | python | def query_search_api(min_lat, max_lat, min_lon, max_lon, max_results):
'''
Send query to the search API and get dict with image data.
'''
# Create URL
params = urllib.urlencode(zip(
['client_id', 'bbox', 'per_page'],
[CLIENT_ID, ','.join([str(min_lon), str(min_lat), str(
max_lon), str(max_lat)]), str(max_results)]
))
print(MAPILLARY_API_IM_SEARCH_URL + params)
# Get data from server, then parse JSON
query = urllib2.urlopen(MAPILLARY_API_IM_SEARCH_URL + params).read()
query = json.loads(query)['features']
print("Result: {0} images in area.".format(len(query)))
return query | [
"def",
"query_search_api",
"(",
"min_lat",
",",
"max_lat",
",",
"min_lon",
",",
"max_lon",
",",
"max_results",
")",
":",
"# Create URL",
"params",
"=",
"urllib",
".",
"urlencode",
"(",
"zip",
"(",
"[",
"'client_id'",
",",
"'bbox'",
",",
"'per_page'",
"]",
... | Send query to the search API and get dict with image data. | [
"Send",
"query",
"to",
"the",
"search",
"API",
"and",
"get",
"dict",
"with",
"image",
"data",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/bin/download_images.py#L31-L49 | train | 201,610 |
mapillary/mapillary_tools | bin/download_images.py | download_images | def download_images(query, path, size=1024):
'''
Download images in query result to path.
Return list of downloaded images with lat,lon.
There are four sizes available: 320, 640, 1024 (default), or 2048.
'''
im_size = "thumb-{0}.jpg".format(size)
im_list = []
for im in query:
# Use key to create url to download from and filename to save into
key = im['properties']['key']
url = MAPILLARY_API_IM_RETRIEVE_URL + key + '/' + im_size
filename = key + ".jpg"
try:
# Get image and save to disk
image = urllib.URLopener()
image.retrieve(url, path + filename)
# Log filename and GPS location
coords = ",".join(map(str, im['geometry']['coordinates']))
im_list.append([filename, coords])
print("Successfully downloaded: {0}".format(filename))
except KeyboardInterrupt:
break
except Exception as e:
print("Failed to download: {} due to {}".format(filename, e))
return im_list | python | def download_images(query, path, size=1024):
'''
Download images in query result to path.
Return list of downloaded images with lat,lon.
There are four sizes available: 320, 640, 1024 (default), or 2048.
'''
im_size = "thumb-{0}.jpg".format(size)
im_list = []
for im in query:
# Use key to create url to download from and filename to save into
key = im['properties']['key']
url = MAPILLARY_API_IM_RETRIEVE_URL + key + '/' + im_size
filename = key + ".jpg"
try:
# Get image and save to disk
image = urllib.URLopener()
image.retrieve(url, path + filename)
# Log filename and GPS location
coords = ",".join(map(str, im['geometry']['coordinates']))
im_list.append([filename, coords])
print("Successfully downloaded: {0}".format(filename))
except KeyboardInterrupt:
break
except Exception as e:
print("Failed to download: {} due to {}".format(filename, e))
return im_list | [
"def",
"download_images",
"(",
"query",
",",
"path",
",",
"size",
"=",
"1024",
")",
":",
"im_size",
"=",
"\"thumb-{0}.jpg\"",
".",
"format",
"(",
"size",
")",
"im_list",
"=",
"[",
"]",
"for",
"im",
"in",
"query",
":",
"# Use key to create url to download fro... | Download images in query result to path.
Return list of downloaded images with lat,lon.
There are four sizes available: 320, 640, 1024 (default), or 2048. | [
"Download",
"images",
"in",
"query",
"result",
"to",
"path",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/bin/download_images.py#L52-L83 | train | 201,611 |
mapillary/mapillary_tools | mapillary_tools/gps_parser.py | get_lat_lon_time_from_gpx | def get_lat_lon_time_from_gpx(gpx_file, local_time=True):
'''
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(gpx_file, 'r') as f:
gpx = gpxpy.parse(f)
points = []
if len(gpx.tracks) > 0:
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
if len(gpx.waypoints) > 0:
for point in gpx.waypoints:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
# sort by time just in case
points.sort()
return points | python | def get_lat_lon_time_from_gpx(gpx_file, local_time=True):
'''
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(gpx_file, 'r') as f:
gpx = gpxpy.parse(f)
points = []
if len(gpx.tracks) > 0:
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
if len(gpx.waypoints) > 0:
for point in gpx.waypoints:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
# sort by time just in case
points.sort()
return points | [
"def",
"get_lat_lon_time_from_gpx",
"(",
"gpx_file",
",",
"local_time",
"=",
"True",
")",
":",
"with",
"open",
"(",
"gpx_file",
",",
"'r'",
")",
"as",
"f",
":",
"gpx",
"=",
"gpxpy",
".",
"parse",
"(",
"f",
")",
"points",
"=",
"[",
"]",
"if",
"len",
... | Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly. | [
"Read",
"location",
"and",
"time",
"stamps",
"from",
"a",
"track",
"in",
"a",
"GPX",
"file",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/gps_parser.py#L20-L47 | train | 201,612 |
mapillary/mapillary_tools | mapillary_tools/gps_parser.py | get_lat_lon_time_from_nmea | def get_lat_lon_time_from_nmea(nmea_file, local_time=True):
'''
Read location and time stamps from a track in a NMEA file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(nmea_file, "r") as f:
lines = f.readlines()
lines = [l.rstrip("\n\r") for l in lines]
# Get initial date
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
break
# Parse GPS trace
points = []
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
if "$GPGGA" in l:
data = pynmea2.parse(l)
timestamp = datetime.datetime.combine(date, data.timestamp)
lat, lon, alt = data.latitude, data.longitude, data.altitude
points.append((timestamp, lat, lon, alt))
points.sort()
return points | python | def get_lat_lon_time_from_nmea(nmea_file, local_time=True):
'''
Read location and time stamps from a track in a NMEA file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(nmea_file, "r") as f:
lines = f.readlines()
lines = [l.rstrip("\n\r") for l in lines]
# Get initial date
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
break
# Parse GPS trace
points = []
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
if "$GPGGA" in l:
data = pynmea2.parse(l)
timestamp = datetime.datetime.combine(date, data.timestamp)
lat, lon, alt = data.latitude, data.longitude, data.altitude
points.append((timestamp, lat, lon, alt))
points.sort()
return points | [
"def",
"get_lat_lon_time_from_nmea",
"(",
"nmea_file",
",",
"local_time",
"=",
"True",
")",
":",
"with",
"open",
"(",
"nmea_file",
",",
"\"r\"",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"lines",
"=",
"[",
"l",
".",
"rstrip",
... | Read location and time stamps from a track in a NMEA file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly. | [
"Read",
"location",
"and",
"time",
"stamps",
"from",
"a",
"track",
"in",
"a",
"NMEA",
"file",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/gps_parser.py#L50-L84 | train | 201,613 |
mapillary/mapillary_tools | mapillary_tools/geo.py | ecef_from_lla | def ecef_from_lla(lat, lon, alt):
'''
Compute ECEF XYZ from latitude, longitude and altitude.
All using the WGS94 model.
Altitude is the distance to the WGS94 ellipsoid.
Check results here http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm
'''
a2 = WGS84_a ** 2
b2 = WGS84_b ** 2
lat = math.radians(lat)
lon = math.radians(lon)
L = 1.0 / math.sqrt(a2 * math.cos(lat) ** 2 + b2 * math.sin(lat) ** 2)
x = (a2 * L + alt) * math.cos(lat) * math.cos(lon)
y = (a2 * L + alt) * math.cos(lat) * math.sin(lon)
z = (b2 * L + alt) * math.sin(lat)
return x, y, z | python | def ecef_from_lla(lat, lon, alt):
'''
Compute ECEF XYZ from latitude, longitude and altitude.
All using the WGS94 model.
Altitude is the distance to the WGS94 ellipsoid.
Check results here http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm
'''
a2 = WGS84_a ** 2
b2 = WGS84_b ** 2
lat = math.radians(lat)
lon = math.radians(lon)
L = 1.0 / math.sqrt(a2 * math.cos(lat) ** 2 + b2 * math.sin(lat) ** 2)
x = (a2 * L + alt) * math.cos(lat) * math.cos(lon)
y = (a2 * L + alt) * math.cos(lat) * math.sin(lon)
z = (b2 * L + alt) * math.sin(lat)
return x, y, z | [
"def",
"ecef_from_lla",
"(",
"lat",
",",
"lon",
",",
"alt",
")",
":",
"a2",
"=",
"WGS84_a",
"**",
"2",
"b2",
"=",
"WGS84_b",
"**",
"2",
"lat",
"=",
"math",
".",
"radians",
"(",
"lat",
")",
"lon",
"=",
"math",
".",
"radians",
"(",
"lon",
")",
"L... | Compute ECEF XYZ from latitude, longitude and altitude.
All using the WGS94 model.
Altitude is the distance to the WGS94 ellipsoid.
Check results here http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm | [
"Compute",
"ECEF",
"XYZ",
"from",
"latitude",
"longitude",
"and",
"altitude",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L13-L30 | train | 201,614 |
mapillary/mapillary_tools | mapillary_tools/geo.py | get_max_distance_from_start | def get_max_distance_from_start(latlon_track):
'''
Returns the radius of an entire GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
start_position = latlon_list[0]
max_distance = 0
for position in latlon_list:
distance = gps_distance(start_position, position)
if distance > max_distance:
max_distance = distance
return max_distance | python | def get_max_distance_from_start(latlon_track):
'''
Returns the radius of an entire GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
start_position = latlon_list[0]
max_distance = 0
for position in latlon_list:
distance = gps_distance(start_position, position)
if distance > max_distance:
max_distance = distance
return max_distance | [
"def",
"get_max_distance_from_start",
"(",
"latlon_track",
")",
":",
"latlon_list",
"=",
"[",
"]",
"# Remove timestamps from list",
"for",
"idx",
",",
"point",
"in",
"enumerate",
"(",
"latlon_track",
")",
":",
"lat",
"=",
"latlon_track",
"[",
"idx",
"]",
"[",
... | Returns the radius of an entire GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input | [
"Returns",
"the",
"radius",
"of",
"an",
"entire",
"GPS",
"track",
".",
"Used",
"to",
"calculate",
"whether",
"or",
"not",
"the",
"entire",
"sequence",
"was",
"just",
"stationary",
"video",
"Takes",
"a",
"sequence",
"of",
"points",
"as",
"input"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L50-L69 | train | 201,615 |
mapillary/mapillary_tools | mapillary_tools/geo.py | get_total_distance_traveled | def get_total_distance_traveled(latlon_track):
'''
Returns the total distance traveled of a GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
total_distance = 0
last_position = latlon_list[0]
for position in latlon_list:
total_distance += gps_distance(last_position, position)
last_position = position
return total_distance | python | def get_total_distance_traveled(latlon_track):
'''
Returns the total distance traveled of a GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
total_distance = 0
last_position = latlon_list[0]
for position in latlon_list:
total_distance += gps_distance(last_position, position)
last_position = position
return total_distance | [
"def",
"get_total_distance_traveled",
"(",
"latlon_track",
")",
":",
"latlon_list",
"=",
"[",
"]",
"# Remove timestamps from list",
"for",
"idx",
",",
"point",
"in",
"enumerate",
"(",
"latlon_track",
")",
":",
"lat",
"=",
"latlon_track",
"[",
"idx",
"]",
"[",
... | Returns the total distance traveled of a GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input | [
"Returns",
"the",
"total",
"distance",
"traveled",
"of",
"a",
"GPS",
"track",
".",
"Used",
"to",
"calculate",
"whether",
"or",
"not",
"the",
"entire",
"sequence",
"was",
"just",
"stationary",
"video",
"Takes",
"a",
"sequence",
"of",
"points",
"as",
"input"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L72-L90 | train | 201,616 |
mapillary/mapillary_tools | mapillary_tools/geo.py | dms_to_decimal | def dms_to_decimal(degrees, minutes, seconds, hemisphere):
'''
Convert from degrees, minutes, seconds to decimal degrees.
@author: mprins
'''
dms = float(degrees) + float(minutes) / 60 + float(seconds) / 3600
if hemisphere in "WwSs":
dms = -1 * dms
return dms | python | def dms_to_decimal(degrees, minutes, seconds, hemisphere):
'''
Convert from degrees, minutes, seconds to decimal degrees.
@author: mprins
'''
dms = float(degrees) + float(minutes) / 60 + float(seconds) / 3600
if hemisphere in "WwSs":
dms = -1 * dms
return dms | [
"def",
"dms_to_decimal",
"(",
"degrees",
",",
"minutes",
",",
"seconds",
",",
"hemisphere",
")",
":",
"dms",
"=",
"float",
"(",
"degrees",
")",
"+",
"float",
"(",
"minutes",
")",
"/",
"60",
"+",
"float",
"(",
"seconds",
")",
"/",
"3600",
"if",
"hemis... | Convert from degrees, minutes, seconds to decimal degrees.
@author: mprins | [
"Convert",
"from",
"degrees",
"minutes",
"seconds",
"to",
"decimal",
"degrees",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L102-L111 | train | 201,617 |
mapillary/mapillary_tools | mapillary_tools/geo.py | decimal_to_dms | def decimal_to_dms(value, precision):
'''
Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF
'''
deg = math.floor(value)
min = math.floor((value - deg) * 60)
sec = math.floor((value - deg - min / 60) * 3600 * precision)
return ((deg, 1), (min, 1), (sec, precision)) | python | def decimal_to_dms(value, precision):
'''
Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF
'''
deg = math.floor(value)
min = math.floor((value - deg) * 60)
sec = math.floor((value - deg - min / 60) * 3600 * precision)
return ((deg, 1), (min, 1), (sec, precision)) | [
"def",
"decimal_to_dms",
"(",
"value",
",",
"precision",
")",
":",
"deg",
"=",
"math",
".",
"floor",
"(",
"value",
")",
"min",
"=",
"math",
".",
"floor",
"(",
"(",
"value",
"-",
"deg",
")",
"*",
"60",
")",
"sec",
"=",
"math",
".",
"floor",
"(",
... | Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF | [
"Convert",
"decimal",
"position",
"to",
"degrees",
"minutes",
"seconds",
"in",
"a",
"fromat",
"supported",
"by",
"EXIF"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L114-L122 | train | 201,618 |
mapillary/mapillary_tools | mapillary_tools/geo.py | compute_bearing | def compute_bearing(start_lat, start_lon, end_lat, end_lon):
'''
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
'''
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lon = math.radians(start_lon)
end_lat = math.radians(end_lat)
end_lon = math.radians(end_lon)
dLong = end_lon - start_lon
dPhi = math.log(math.tan(end_lat / 2.0 + math.pi / 4.0) /
math.tan(start_lat / 2.0 + math.pi / 4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
y = math.sin(dLong) * math.cos(end_lat)
x = math.cos(start_lat) * math.sin(end_lat) - \
math.sin(start_lat) * math.cos(end_lat) * math.cos(dLong)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing | python | def compute_bearing(start_lat, start_lon, end_lat, end_lon):
'''
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
'''
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lon = math.radians(start_lon)
end_lat = math.radians(end_lat)
end_lon = math.radians(end_lon)
dLong = end_lon - start_lon
dPhi = math.log(math.tan(end_lat / 2.0 + math.pi / 4.0) /
math.tan(start_lat / 2.0 + math.pi / 4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
y = math.sin(dLong) * math.cos(end_lat)
x = math.cos(start_lat) * math.sin(end_lat) - \
math.sin(start_lat) * math.cos(end_lat) * math.cos(dLong)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing | [
"def",
"compute_bearing",
"(",
"start_lat",
",",
"start_lon",
",",
"end_lat",
",",
"end_lon",
")",
":",
"# make sure everything is in radians",
"start_lat",
"=",
"math",
".",
"radians",
"(",
"start_lat",
")",
"start_lon",
"=",
"math",
".",
"radians",
"(",
"start... | Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html | [
"Get",
"the",
"compass",
"bearing",
"from",
"start",
"to",
"end",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L143-L171 | train | 201,619 |
mapillary/mapillary_tools | mapillary_tools/geo.py | diff_bearing | def diff_bearing(b1, b2):
'''
Compute difference between two bearings
'''
d = abs(b2 - b1)
d = 360 - d if d > 180 else d
return d | python | def diff_bearing(b1, b2):
'''
Compute difference between two bearings
'''
d = abs(b2 - b1)
d = 360 - d if d > 180 else d
return d | [
"def",
"diff_bearing",
"(",
"b1",
",",
"b2",
")",
":",
"d",
"=",
"abs",
"(",
"b2",
"-",
"b1",
")",
"d",
"=",
"360",
"-",
"d",
"if",
"d",
">",
"180",
"else",
"d",
"return",
"d"
] | Compute difference between two bearings | [
"Compute",
"difference",
"between",
"two",
"bearings"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L174-L180 | train | 201,620 |
mapillary/mapillary_tools | mapillary_tools/geo.py | normalize_bearing | def normalize_bearing(bearing, check_hex=False):
'''
Normalize bearing and convert from hex if
'''
if bearing > 360 and check_hex:
# fix negative value wrongly parsed in exifread
# -360 degree -> 4294966935 when converting from hex
bearing = bin(int(bearing))[2:]
bearing = ''.join([str(int(int(a) == 0)) for a in bearing])
bearing = -float(int(bearing, 2))
bearing %= 360
return bearing | python | def normalize_bearing(bearing, check_hex=False):
'''
Normalize bearing and convert from hex if
'''
if bearing > 360 and check_hex:
# fix negative value wrongly parsed in exifread
# -360 degree -> 4294966935 when converting from hex
bearing = bin(int(bearing))[2:]
bearing = ''.join([str(int(int(a) == 0)) for a in bearing])
bearing = -float(int(bearing, 2))
bearing %= 360
return bearing | [
"def",
"normalize_bearing",
"(",
"bearing",
",",
"check_hex",
"=",
"False",
")",
":",
"if",
"bearing",
">",
"360",
"and",
"check_hex",
":",
"# fix negative value wrongly parsed in exifread",
"# -360 degree -> 4294966935 when converting from hex",
"bearing",
"=",
"bin",
"(... | Normalize bearing and convert from hex if | [
"Normalize",
"bearing",
"and",
"convert",
"from",
"hex",
"if"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L191-L202 | train | 201,621 |
mapillary/mapillary_tools | mapillary_tools/geo.py | interpolate_lat_lon | def interpolate_lat_lon(points, t, max_dt=1):
'''
Return interpolated lat, lon and compass bearing for time t.
Points is a list of tuples (time, lat, lon, elevation), t a datetime object.
'''
# find the enclosing points in sorted list
if (t <= points[0][0]) or (t >= points[-1][0]):
if t <= points[0][0]:
dt = abs((points[0][0] - t).total_seconds())
else:
dt = (t - points[-1][0]).total_seconds()
if dt > max_dt:
raise ValueError(
"time t not in scope of gpx file by {} seconds".format(dt))
else:
print(
"time t not in scope of gpx file by {} seconds, extrapolating...".format(dt))
if t < points[0][0]:
before = points[0]
after = points[1]
else:
before = points[-2]
after = points[-1]
bearing = compute_bearing(before[1], before[2], after[1], after[2])
if t == points[0][0]:
x = points[0]
return (x[1], x[2], bearing, x[3])
if t == points[-1][0]:
x = points[-1]
return (x[1], x[2], bearing, x[3])
else:
for i, point in enumerate(points):
if t < point[0]:
if i > 0:
before = points[i - 1]
else:
before = points[i]
after = points[i]
break
# weight based on time
weight = (t - before[0]).total_seconds() / \
(after[0] - before[0]).total_seconds()
# simple linear interpolation in case points are not the same
if before[1] == after[1]:
lat = before[1]
else:
lat = before[1] - weight * before[1] + weight * after[1]
if before[2] == after[2]:
lon = before[2]
else:
lon = before[2] - weight * before[2] + weight * after[2]
# camera angle
bearing = compute_bearing(before[1], before[2], after[1], after[2])
# altitude
if before[3] is not None:
ele = before[3] - weight * before[3] + weight * after[3]
else:
ele = None
return lat, lon, bearing, ele | python | def interpolate_lat_lon(points, t, max_dt=1):
'''
Return interpolated lat, lon and compass bearing for time t.
Points is a list of tuples (time, lat, lon, elevation), t a datetime object.
'''
# find the enclosing points in sorted list
if (t <= points[0][0]) or (t >= points[-1][0]):
if t <= points[0][0]:
dt = abs((points[0][0] - t).total_seconds())
else:
dt = (t - points[-1][0]).total_seconds()
if dt > max_dt:
raise ValueError(
"time t not in scope of gpx file by {} seconds".format(dt))
else:
print(
"time t not in scope of gpx file by {} seconds, extrapolating...".format(dt))
if t < points[0][0]:
before = points[0]
after = points[1]
else:
before = points[-2]
after = points[-1]
bearing = compute_bearing(before[1], before[2], after[1], after[2])
if t == points[0][0]:
x = points[0]
return (x[1], x[2], bearing, x[3])
if t == points[-1][0]:
x = points[-1]
return (x[1], x[2], bearing, x[3])
else:
for i, point in enumerate(points):
if t < point[0]:
if i > 0:
before = points[i - 1]
else:
before = points[i]
after = points[i]
break
# weight based on time
weight = (t - before[0]).total_seconds() / \
(after[0] - before[0]).total_seconds()
# simple linear interpolation in case points are not the same
if before[1] == after[1]:
lat = before[1]
else:
lat = before[1] - weight * before[1] + weight * after[1]
if before[2] == after[2]:
lon = before[2]
else:
lon = before[2] - weight * before[2] + weight * after[2]
# camera angle
bearing = compute_bearing(before[1], before[2], after[1], after[2])
# altitude
if before[3] is not None:
ele = before[3] - weight * before[3] + weight * after[3]
else:
ele = None
return lat, lon, bearing, ele | [
"def",
"interpolate_lat_lon",
"(",
"points",
",",
"t",
",",
"max_dt",
"=",
"1",
")",
":",
"# find the enclosing points in sorted list",
"if",
"(",
"t",
"<=",
"points",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"or",
"(",
"t",
">=",
"points",
"[",
"-",
"1",
... | Return interpolated lat, lon and compass bearing for time t.
Points is a list of tuples (time, lat, lon, elevation), t a datetime object. | [
"Return",
"interpolated",
"lat",
"lon",
"and",
"compass",
"bearing",
"for",
"time",
"t",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L205-L273 | train | 201,622 |
mapillary/mapillary_tools | mapillary_tools/exif_write.py | ExifEdit.add_image_description | def add_image_description(self, dict):
"""Add a dict to image description."""
if self._ef is not None:
self._ef['0th'][piexif.ImageIFD.ImageDescription] = json.dumps(
dict) | python | def add_image_description(self, dict):
"""Add a dict to image description."""
if self._ef is not None:
self._ef['0th'][piexif.ImageIFD.ImageDescription] = json.dumps(
dict) | [
"def",
"add_image_description",
"(",
"self",
",",
"dict",
")",
":",
"if",
"self",
".",
"_ef",
"is",
"not",
"None",
":",
"self",
".",
"_ef",
"[",
"'0th'",
"]",
"[",
"piexif",
".",
"ImageIFD",
".",
"ImageDescription",
"]",
"=",
"json",
".",
"dumps",
"(... | Add a dict to image description. | [
"Add",
"a",
"dict",
"to",
"image",
"description",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L25-L29 | train | 201,623 |
mapillary/mapillary_tools | mapillary_tools/exif_write.py | ExifEdit.add_orientation | def add_orientation(self, orientation):
"""Add image orientation to image."""
if not orientation in range(1, 9):
print_error(
"Error value for orientation, value must be in range(1,9), setting to default 1")
self._ef['0th'][piexif.ImageIFD.Orientation] = 1
else:
self._ef['0th'][piexif.ImageIFD.Orientation] = orientation | python | def add_orientation(self, orientation):
"""Add image orientation to image."""
if not orientation in range(1, 9):
print_error(
"Error value for orientation, value must be in range(1,9), setting to default 1")
self._ef['0th'][piexif.ImageIFD.Orientation] = 1
else:
self._ef['0th'][piexif.ImageIFD.Orientation] = orientation | [
"def",
"add_orientation",
"(",
"self",
",",
"orientation",
")",
":",
"if",
"not",
"orientation",
"in",
"range",
"(",
"1",
",",
"9",
")",
":",
"print_error",
"(",
"\"Error value for orientation, value must be in range(1,9), setting to default 1\"",
")",
"self",
".",
... | Add image orientation to image. | [
"Add",
"image",
"orientation",
"to",
"image",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L31-L38 | train | 201,624 |
mapillary/mapillary_tools | mapillary_tools/exif_write.py | ExifEdit.add_date_time_original | def add_date_time_original(self, date_time, time_format='%Y:%m:%d %H:%M:%S.%f'):
"""Add date time original."""
try:
DateTimeOriginal = date_time.strftime(time_format)[:-3]
self._ef['Exif'][piexif.ExifIFD.DateTimeOriginal] = DateTimeOriginal
except Exception as e:
print_error("Error writing DateTimeOriginal, due to " + str(e)) | python | def add_date_time_original(self, date_time, time_format='%Y:%m:%d %H:%M:%S.%f'):
"""Add date time original."""
try:
DateTimeOriginal = date_time.strftime(time_format)[:-3]
self._ef['Exif'][piexif.ExifIFD.DateTimeOriginal] = DateTimeOriginal
except Exception as e:
print_error("Error writing DateTimeOriginal, due to " + str(e)) | [
"def",
"add_date_time_original",
"(",
"self",
",",
"date_time",
",",
"time_format",
"=",
"'%Y:%m:%d %H:%M:%S.%f'",
")",
":",
"try",
":",
"DateTimeOriginal",
"=",
"date_time",
".",
"strftime",
"(",
"time_format",
")",
"[",
":",
"-",
"3",
"]",
"self",
".",
"_e... | Add date time original. | [
"Add",
"date",
"time",
"original",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L40-L46 | train | 201,625 |
mapillary/mapillary_tools | mapillary_tools/exif_write.py | ExifEdit.add_image_history | def add_image_history(self, data):
"""Add arbitrary string to ImageHistory tag."""
self._ef['0th'][piexif.ImageIFD.ImageHistory] = json.dumps(data) | python | def add_image_history(self, data):
"""Add arbitrary string to ImageHistory tag."""
self._ef['0th'][piexif.ImageIFD.ImageHistory] = json.dumps(data) | [
"def",
"add_image_history",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_ef",
"[",
"'0th'",
"]",
"[",
"piexif",
".",
"ImageIFD",
".",
"ImageHistory",
"]",
"=",
"json",
".",
"dumps",
"(",
"data",
")"
] | Add arbitrary string to ImageHistory tag. | [
"Add",
"arbitrary",
"string",
"to",
"ImageHistory",
"tag",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L57-L59 | train | 201,626 |
mapillary/mapillary_tools | mapillary_tools/exif_write.py | ExifEdit.add_camera_make_model | def add_camera_make_model(self, make, model):
''' Add camera make and model.'''
self._ef['0th'][piexif.ImageIFD.Make] = make
self._ef['0th'][piexif.ImageIFD.Model] = model | python | def add_camera_make_model(self, make, model):
''' Add camera make and model.'''
self._ef['0th'][piexif.ImageIFD.Make] = make
self._ef['0th'][piexif.ImageIFD.Model] = model | [
"def",
"add_camera_make_model",
"(",
"self",
",",
"make",
",",
"model",
")",
":",
"self",
".",
"_ef",
"[",
"'0th'",
"]",
"[",
"piexif",
".",
"ImageIFD",
".",
"Make",
"]",
"=",
"make",
"self",
".",
"_ef",
"[",
"'0th'",
"]",
"[",
"piexif",
".",
"Imag... | Add camera make and model. | [
"Add",
"camera",
"make",
"and",
"model",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L61-L64 | train | 201,627 |
mapillary/mapillary_tools | mapillary_tools/exif_write.py | ExifEdit.add_direction | def add_direction(self, direction, ref="T", precision=100):
"""Add image direction."""
# normalize direction
direction = direction % 360.0
self._ef["GPS"][piexif.GPSIFD.GPSImgDirection] = (
int(abs(direction) * precision), precision)
self._ef["GPS"][piexif.GPSIFD.GPSImgDirectionRef] = ref | python | def add_direction(self, direction, ref="T", precision=100):
"""Add image direction."""
# normalize direction
direction = direction % 360.0
self._ef["GPS"][piexif.GPSIFD.GPSImgDirection] = (
int(abs(direction) * precision), precision)
self._ef["GPS"][piexif.GPSIFD.GPSImgDirectionRef] = ref | [
"def",
"add_direction",
"(",
"self",
",",
"direction",
",",
"ref",
"=",
"\"T\"",
",",
"precision",
"=",
"100",
")",
":",
"# normalize direction",
"direction",
"=",
"direction",
"%",
"360.0",
"self",
".",
"_ef",
"[",
"\"GPS\"",
"]",
"[",
"piexif",
".",
"G... | Add image direction. | [
"Add",
"image",
"direction",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L78-L84 | train | 201,628 |
mapillary/mapillary_tools | mapillary_tools/exif_write.py | ExifEdit.write | def write(self, filename=None):
"""Save exif data to file."""
if filename is None:
filename = self._filename
exif_bytes = piexif.dump(self._ef)
with open(self._filename, "rb") as fin:
img = fin.read()
try:
piexif.insert(exif_bytes, img, filename)
except IOError:
type, value, traceback = sys.exc_info()
print >> sys.stderr, "Error saving file:", value | python | def write(self, filename=None):
"""Save exif data to file."""
if filename is None:
filename = self._filename
exif_bytes = piexif.dump(self._ef)
with open(self._filename, "rb") as fin:
img = fin.read()
try:
piexif.insert(exif_bytes, img, filename)
except IOError:
type, value, traceback = sys.exc_info()
print >> sys.stderr, "Error saving file:", value | [
"def",
"write",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"_filename",
"exif_bytes",
"=",
"piexif",
".",
"dump",
"(",
"self",
".",
"_ef",
")",
"with",
"open",
"(",
"self",
... | Save exif data to file. | [
"Save",
"exif",
"data",
"to",
"file",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L97-L111 | train | 201,629 |
mapillary/mapillary_tools | mapillary_tools/processing.py | estimate_sub_second_time | def estimate_sub_second_time(files, interval=0.0):
'''
Estimate the capture time of a sequence with sub-second precision
EXIF times are only given up to a second of precision. This function
uses the given interval between shots to estimate the time inside that
second that each picture was taken.
'''
if interval <= 0.0:
return [exif_time(f) for f in tqdm(files, desc="Reading image capture time")]
onesecond = datetime.timedelta(seconds=1.0)
T = datetime.timedelta(seconds=interval)
for i, f in tqdm(enumerate(files), desc="Estimating subsecond time"):
m = exif_time(f)
if not m:
pass
if i == 0:
smin = m
smax = m + onesecond
else:
m0 = m - T * i
smin = max(smin, m0)
smax = min(smax, m0 + onesecond)
if not smin or not smax:
return None
if smin > smax:
# ERROR LOG
print('Interval not compatible with EXIF times')
return None
else:
s = smin + (smax - smin) / 2
return [s + T * i for i in range(len(files))] | python | def estimate_sub_second_time(files, interval=0.0):
'''
Estimate the capture time of a sequence with sub-second precision
EXIF times are only given up to a second of precision. This function
uses the given interval between shots to estimate the time inside that
second that each picture was taken.
'''
if interval <= 0.0:
return [exif_time(f) for f in tqdm(files, desc="Reading image capture time")]
onesecond = datetime.timedelta(seconds=1.0)
T = datetime.timedelta(seconds=interval)
for i, f in tqdm(enumerate(files), desc="Estimating subsecond time"):
m = exif_time(f)
if not m:
pass
if i == 0:
smin = m
smax = m + onesecond
else:
m0 = m - T * i
smin = max(smin, m0)
smax = min(smax, m0 + onesecond)
if not smin or not smax:
return None
if smin > smax:
# ERROR LOG
print('Interval not compatible with EXIF times')
return None
else:
s = smin + (smax - smin) / 2
return [s + T * i for i in range(len(files))] | [
"def",
"estimate_sub_second_time",
"(",
"files",
",",
"interval",
"=",
"0.0",
")",
":",
"if",
"interval",
"<=",
"0.0",
":",
"return",
"[",
"exif_time",
"(",
"f",
")",
"for",
"f",
"in",
"tqdm",
"(",
"files",
",",
"desc",
"=",
"\"Reading image capture time\"... | Estimate the capture time of a sequence with sub-second precision
EXIF times are only given up to a second of precision. This function
uses the given interval between shots to estimate the time inside that
second that each picture was taken. | [
"Estimate",
"the",
"capture",
"time",
"of",
"a",
"sequence",
"with",
"sub",
"-",
"second",
"precision",
"EXIF",
"times",
"are",
"only",
"given",
"up",
"to",
"a",
"second",
"of",
"precision",
".",
"This",
"function",
"uses",
"the",
"given",
"interval",
"bet... | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/processing.py#L43-L74 | train | 201,630 |
mapillary/mapillary_tools | mapillary_tools/processing.py | interpolate_timestamp | def interpolate_timestamp(capture_times):
'''
Interpolate time stamps in case of identical timestamps
'''
timestamps = []
num_file = len(capture_times)
time_dict = OrderedDict()
if num_file < 2:
return capture_times
# trace identical timestamps (always assume capture_times is sorted)
time_dict = OrderedDict()
for i, t in enumerate(capture_times):
if t not in time_dict:
time_dict[t] = {
"count": 0,
"pointer": 0
}
interval = 0
if i != 0:
interval = (t - capture_times[i - 1]).total_seconds()
time_dict[capture_times[i - 1]]["interval"] = interval
time_dict[t]["count"] += 1
if len(time_dict) >= 2:
# set time interval as the last available time interval
time_dict[time_dict.keys()[-1]
]["interval"] = time_dict[time_dict.keys()[-2]]["interval"]
else:
# set time interval assuming capture interval is 1 second
time_dict[time_dict.keys()[0]]["interval"] = time_dict[time_dict.keys()[
0]]["count"] * 1.
# interpolate timestamps
for t in capture_times:
d = time_dict[t]
s = datetime.timedelta(
seconds=d["pointer"] * d["interval"] / float(d["count"]))
updated_time = t + s
time_dict[t]["pointer"] += 1
timestamps.append(updated_time)
return timestamps | python | def interpolate_timestamp(capture_times):
'''
Interpolate time stamps in case of identical timestamps
'''
timestamps = []
num_file = len(capture_times)
time_dict = OrderedDict()
if num_file < 2:
return capture_times
# trace identical timestamps (always assume capture_times is sorted)
time_dict = OrderedDict()
for i, t in enumerate(capture_times):
if t not in time_dict:
time_dict[t] = {
"count": 0,
"pointer": 0
}
interval = 0
if i != 0:
interval = (t - capture_times[i - 1]).total_seconds()
time_dict[capture_times[i - 1]]["interval"] = interval
time_dict[t]["count"] += 1
if len(time_dict) >= 2:
# set time interval as the last available time interval
time_dict[time_dict.keys()[-1]
]["interval"] = time_dict[time_dict.keys()[-2]]["interval"]
else:
# set time interval assuming capture interval is 1 second
time_dict[time_dict.keys()[0]]["interval"] = time_dict[time_dict.keys()[
0]]["count"] * 1.
# interpolate timestamps
for t in capture_times:
d = time_dict[t]
s = datetime.timedelta(
seconds=d["pointer"] * d["interval"] / float(d["count"]))
updated_time = t + s
time_dict[t]["pointer"] += 1
timestamps.append(updated_time)
return timestamps | [
"def",
"interpolate_timestamp",
"(",
"capture_times",
")",
":",
"timestamps",
"=",
"[",
"]",
"num_file",
"=",
"len",
"(",
"capture_times",
")",
"time_dict",
"=",
"OrderedDict",
"(",
")",
"if",
"num_file",
"<",
"2",
":",
"return",
"capture_times",
"# trace iden... | Interpolate time stamps in case of identical timestamps | [
"Interpolate",
"time",
"stamps",
"in",
"case",
"of",
"identical",
"timestamps"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/processing.py#L1076-L1122 | train | 201,631 |
mapillary/mapillary_tools | mapillary_tools/ffmpeg.py | extract_stream | def extract_stream(source, dest, stream_id):
'''
Get the data out of the file using ffmpeg
@param filename: mp4 filename
'''
if not os.path.isfile(source):
raise IOError('No such file: ' + source)
subprocess.check_output([
'ffmpeg',
'-i', source,
'-y', # overwrite - potentially dangerous
'-nostats',
'-loglevel', '0',
'-codec', 'copy',
'-map', '0:' + str(stream_id),
'-f', 'rawvideo',
dest,
]) | python | def extract_stream(source, dest, stream_id):
'''
Get the data out of the file using ffmpeg
@param filename: mp4 filename
'''
if not os.path.isfile(source):
raise IOError('No such file: ' + source)
subprocess.check_output([
'ffmpeg',
'-i', source,
'-y', # overwrite - potentially dangerous
'-nostats',
'-loglevel', '0',
'-codec', 'copy',
'-map', '0:' + str(stream_id),
'-f', 'rawvideo',
dest,
]) | [
"def",
"extract_stream",
"(",
"source",
",",
"dest",
",",
"stream_id",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"source",
")",
":",
"raise",
"IOError",
"(",
"'No such file: '",
"+",
"source",
")",
"subprocess",
".",
"check_output",
"... | Get the data out of the file using ffmpeg
@param filename: mp4 filename | [
"Get",
"the",
"data",
"out",
"of",
"the",
"file",
"using",
"ffmpeg"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/ffmpeg.py#L42-L60 | train | 201,632 |
mapillary/mapillary_tools | mapillary_tools/exif_aux.py | verify_exif | def verify_exif(filename):
'''
Check that image file has the required EXIF fields.
Incompatible files will be ignored server side.
'''
# required tags in IFD name convention
required_exif = required_fields()
exif = ExifRead(filename)
required_exif_exist = exif.fields_exist(required_exif)
return required_exif_exist | python | def verify_exif(filename):
'''
Check that image file has the required EXIF fields.
Incompatible files will be ignored server side.
'''
# required tags in IFD name convention
required_exif = required_fields()
exif = ExifRead(filename)
required_exif_exist = exif.fields_exist(required_exif)
return required_exif_exist | [
"def",
"verify_exif",
"(",
"filename",
")",
":",
"# required tags in IFD name convention",
"required_exif",
"=",
"required_fields",
"(",
")",
"exif",
"=",
"ExifRead",
"(",
"filename",
")",
"required_exif_exist",
"=",
"exif",
".",
"fields_exist",
"(",
"required_exif",
... | Check that image file has the required EXIF fields.
Incompatible files will be ignored server side. | [
"Check",
"that",
"image",
"file",
"has",
"the",
"required",
"EXIF",
"fields",
".",
"Incompatible",
"files",
"will",
"be",
"ignored",
"server",
"side",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_aux.py#L35-L44 | train | 201,633 |
mapillary/mapillary_tools | mapillary_tools/exif_aux.py | verify_mapillary_tag | def verify_mapillary_tag(filepath):
filepath_keep_original = processing.processed_images_rootpath(filepath)
if os.path.isfile(filepath_keep_original):
filepath = filepath_keep_original
'''
Check that image file has the required Mapillary tag
'''
return ExifRead(filepath).mapillary_tag_exists() | python | def verify_mapillary_tag(filepath):
filepath_keep_original = processing.processed_images_rootpath(filepath)
if os.path.isfile(filepath_keep_original):
filepath = filepath_keep_original
'''
Check that image file has the required Mapillary tag
'''
return ExifRead(filepath).mapillary_tag_exists() | [
"def",
"verify_mapillary_tag",
"(",
"filepath",
")",
":",
"filepath_keep_original",
"=",
"processing",
".",
"processed_images_rootpath",
"(",
"filepath",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath_keep_original",
")",
":",
"filepath",
"=",
"filepa... | Check that image file has the required Mapillary tag | [
"Check",
"that",
"image",
"file",
"has",
"the",
"required",
"Mapillary",
"tag"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_aux.py#L47-L54 | train | 201,634 |
mapillary/mapillary_tools | mapillary_tools/ffprobe.py | FFStream.isAudio | def isAudio(self):
"""
Is this stream labelled as an audio stream?
"""
val=False
if self.__dict__['codec_type']:
if str(self.__dict__['codec_type']) == 'audio':
val=True
return val | python | def isAudio(self):
"""
Is this stream labelled as an audio stream?
"""
val=False
if self.__dict__['codec_type']:
if str(self.__dict__['codec_type']) == 'audio':
val=True
return val | [
"def",
"isAudio",
"(",
"self",
")",
":",
"val",
"=",
"False",
"if",
"self",
".",
"__dict__",
"[",
"'codec_type'",
"]",
":",
"if",
"str",
"(",
"self",
".",
"__dict__",
"[",
"'codec_type'",
"]",
")",
"==",
"'audio'",
":",
"val",
"=",
"True",
"return",
... | Is this stream labelled as an audio stream? | [
"Is",
"this",
"stream",
"labelled",
"as",
"an",
"audio",
"stream?"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/ffprobe.py#L88-L96 | train | 201,635 |
mapillary/mapillary_tools | mapillary_tools/ffprobe.py | FFStream.isVideo | def isVideo(self):
"""
Is the stream labelled as a video stream.
"""
val=False
if self.__dict__['codec_type']:
if self.codec_type == 'video':
val=True
return val | python | def isVideo(self):
"""
Is the stream labelled as a video stream.
"""
val=False
if self.__dict__['codec_type']:
if self.codec_type == 'video':
val=True
return val | [
"def",
"isVideo",
"(",
"self",
")",
":",
"val",
"=",
"False",
"if",
"self",
".",
"__dict__",
"[",
"'codec_type'",
"]",
":",
"if",
"self",
".",
"codec_type",
"==",
"'video'",
":",
"val",
"=",
"True",
"return",
"val"
] | Is the stream labelled as a video stream. | [
"Is",
"the",
"stream",
"labelled",
"as",
"a",
"video",
"stream",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/ffprobe.py#L98-L106 | train | 201,636 |
mapillary/mapillary_tools | mapillary_tools/ffprobe.py | FFStream.isSubtitle | def isSubtitle(self):
"""
Is the stream labelled as a subtitle stream.
"""
val=False
if self.__dict__['codec_type']:
if str(self.codec_type)=='subtitle':
val=True
return val | python | def isSubtitle(self):
"""
Is the stream labelled as a subtitle stream.
"""
val=False
if self.__dict__['codec_type']:
if str(self.codec_type)=='subtitle':
val=True
return val | [
"def",
"isSubtitle",
"(",
"self",
")",
":",
"val",
"=",
"False",
"if",
"self",
".",
"__dict__",
"[",
"'codec_type'",
"]",
":",
"if",
"str",
"(",
"self",
".",
"codec_type",
")",
"==",
"'subtitle'",
":",
"val",
"=",
"True",
"return",
"val"
] | Is the stream labelled as a subtitle stream. | [
"Is",
"the",
"stream",
"labelled",
"as",
"a",
"subtitle",
"stream",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/ffprobe.py#L108-L116 | train | 201,637 |
mapillary/mapillary_tools | mapillary_tools/ffprobe.py | FFStream.frames | def frames(self):
"""
Returns the length of a video stream in frames. Returns 0 if not a video stream.
"""
f=0
if self.isVideo() or self.isAudio():
if self.__dict__['nb_frames']:
try:
f=int(self.__dict__['nb_frames'])
except Exception as e:
print "None integer frame count"
return f | python | def frames(self):
"""
Returns the length of a video stream in frames. Returns 0 if not a video stream.
"""
f=0
if self.isVideo() or self.isAudio():
if self.__dict__['nb_frames']:
try:
f=int(self.__dict__['nb_frames'])
except Exception as e:
print "None integer frame count"
return f | [
"def",
"frames",
"(",
"self",
")",
":",
"f",
"=",
"0",
"if",
"self",
".",
"isVideo",
"(",
")",
"or",
"self",
".",
"isAudio",
"(",
")",
":",
"if",
"self",
".",
"__dict__",
"[",
"'nb_frames'",
"]",
":",
"try",
":",
"f",
"=",
"int",
"(",
"self",
... | Returns the length of a video stream in frames. Returns 0 if not a video stream. | [
"Returns",
"the",
"length",
"of",
"a",
"video",
"stream",
"in",
"frames",
".",
"Returns",
"0",
"if",
"not",
"a",
"video",
"stream",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/ffprobe.py#L144-L155 | train | 201,638 |
mapillary/mapillary_tools | mapillary_tools/ffprobe.py | FFStream.durationSeconds | def durationSeconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
f=0.0
if self.isVideo() or self.isAudio():
if self.__dict__['duration']:
try:
f=float(self.__dict__['duration'])
except Exception as e:
print "None numeric duration"
return f | python | def durationSeconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
f=0.0
if self.isVideo() or self.isAudio():
if self.__dict__['duration']:
try:
f=float(self.__dict__['duration'])
except Exception as e:
print "None numeric duration"
return f | [
"def",
"durationSeconds",
"(",
"self",
")",
":",
"f",
"=",
"0.0",
"if",
"self",
".",
"isVideo",
"(",
")",
"or",
"self",
".",
"isAudio",
"(",
")",
":",
"if",
"self",
".",
"__dict__",
"[",
"'duration'",
"]",
":",
"try",
":",
"f",
"=",
"float",
"(",... | Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream. | [
"Returns",
"the",
"runtime",
"duration",
"of",
"the",
"video",
"stream",
"as",
"a",
"floating",
"point",
"number",
"of",
"seconds",
".",
"Returns",
"0",
".",
"0",
"if",
"not",
"a",
"video",
"stream",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/ffprobe.py#L157-L169 | train | 201,639 |
mapillary/mapillary_tools | mapillary_tools/ffprobe.py | FFStream.bitrate | def bitrate(self):
"""
Returns bitrate as an integer in bps
"""
b=0
if self.__dict__['bit_rate']:
try:
b=int(self.__dict__['bit_rate'])
except Exception as e:
print "None integer bitrate"
return b | python | def bitrate(self):
"""
Returns bitrate as an integer in bps
"""
b=0
if self.__dict__['bit_rate']:
try:
b=int(self.__dict__['bit_rate'])
except Exception as e:
print "None integer bitrate"
return b | [
"def",
"bitrate",
"(",
"self",
")",
":",
"b",
"=",
"0",
"if",
"self",
".",
"__dict__",
"[",
"'bit_rate'",
"]",
":",
"try",
":",
"b",
"=",
"int",
"(",
"self",
".",
"__dict__",
"[",
"'bit_rate'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"pr... | Returns bitrate as an integer in bps | [
"Returns",
"bitrate",
"as",
"an",
"integer",
"in",
"bps"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/ffprobe.py#L207-L217 | train | 201,640 |
mapillary/mapillary_tools | mapillary_tools/uploader.py | get_upload_url | def get_upload_url(credentials):
'''
Returns upload URL using new upload API
'''
request_url = "https://a.mapillary.com/v3/users/{}/upload_secrets?client_id={}".format(
credentials["MAPSettingsUserKey"], CLIENT_ID)
request = urllib2.Request(request_url)
request.add_header('Authorization', 'Bearer {}'.format(
credentials["user_upload_token"]))
try:
response = json.loads(urllib2.urlopen(request).read())
except requests.exceptions.HTTPError as e:
print("Error getting upload parameters, upload could not start")
sys.exit(1)
return response | python | def get_upload_url(credentials):
'''
Returns upload URL using new upload API
'''
request_url = "https://a.mapillary.com/v3/users/{}/upload_secrets?client_id={}".format(
credentials["MAPSettingsUserKey"], CLIENT_ID)
request = urllib2.Request(request_url)
request.add_header('Authorization', 'Bearer {}'.format(
credentials["user_upload_token"]))
try:
response = json.loads(urllib2.urlopen(request).read())
except requests.exceptions.HTTPError as e:
print("Error getting upload parameters, upload could not start")
sys.exit(1)
return response | [
"def",
"get_upload_url",
"(",
"credentials",
")",
":",
"request_url",
"=",
"\"https://a.mapillary.com/v3/users/{}/upload_secrets?client_id={}\"",
".",
"format",
"(",
"credentials",
"[",
"\"MAPSettingsUserKey\"",
"]",
",",
"CLIENT_ID",
")",
"request",
"=",
"urllib2",
".",
... | Returns upload URL using new upload API | [
"Returns",
"upload",
"URL",
"using",
"new",
"upload",
"API"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/uploader.py#L157-L171 | train | 201,641 |
mapillary/mapillary_tools | mapillary_tools/uploader.py | get_upload_token | def get_upload_token(mail, pwd):
'''
Get upload token
'''
try:
params = urllib.urlencode({"email": mail, "password": pwd})
response = urllib.urlopen(LOGIN_URL, params)
except:
return None
resp = json.loads(response.read())
if not resp or 'token' not in resp:
return None
return resp['token'] | python | def get_upload_token(mail, pwd):
'''
Get upload token
'''
try:
params = urllib.urlencode({"email": mail, "password": pwd})
response = urllib.urlopen(LOGIN_URL, params)
except:
return None
resp = json.loads(response.read())
if not resp or 'token' not in resp:
return None
return resp['token'] | [
"def",
"get_upload_token",
"(",
"mail",
",",
"pwd",
")",
":",
"try",
":",
"params",
"=",
"urllib",
".",
"urlencode",
"(",
"{",
"\"email\"",
":",
"mail",
",",
"\"password\"",
":",
"pwd",
"}",
")",
"response",
"=",
"urllib",
".",
"urlopen",
"(",
"LOGIN_U... | Get upload token | [
"Get",
"upload",
"token"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/uploader.py#L338-L350 | train | 201,642 |
mapillary/mapillary_tools | mapillary_tools/uploader.py | authenticate_with_email_and_pwd | def authenticate_with_email_and_pwd(user_email, user_password):
'''
Authenticate the user by passing the email and password.
This function avoids prompting the command line for user credentials and is useful for calling tools programmatically
'''
if user_email is None or user_password is None:
raise ValueError(
'Could not authenticate user. Missing username or password')
upload_token = uploader.get_upload_token(user_email, user_password)
if not upload_token:
print("Authentication failed for user name " +
user_name + ", please try again.")
sys.exit(1)
user_key = get_user_key(user_name)
if not user_key:
print("User name {} does not exist, please try again or contact Mapillary user support.".format(
user_name))
sys.exit(1)
user_permission_hash, user_signature_hash = get_user_hashes(
user_key, upload_token)
user_items["MAPSettingsUsername"] = section
user_items["MAPSettingsUserKey"] = user_key
user_items["user_upload_token"] = upload_token
user_items["user_permission_hash"] = user_permission_hash
user_items["user_signature_hash"] = user_signature_hash
return user_items | python | def authenticate_with_email_and_pwd(user_email, user_password):
'''
Authenticate the user by passing the email and password.
This function avoids prompting the command line for user credentials and is useful for calling tools programmatically
'''
if user_email is None or user_password is None:
raise ValueError(
'Could not authenticate user. Missing username or password')
upload_token = uploader.get_upload_token(user_email, user_password)
if not upload_token:
print("Authentication failed for user name " +
user_name + ", please try again.")
sys.exit(1)
user_key = get_user_key(user_name)
if not user_key:
print("User name {} does not exist, please try again or contact Mapillary user support.".format(
user_name))
sys.exit(1)
user_permission_hash, user_signature_hash = get_user_hashes(
user_key, upload_token)
user_items["MAPSettingsUsername"] = section
user_items["MAPSettingsUserKey"] = user_key
user_items["user_upload_token"] = upload_token
user_items["user_permission_hash"] = user_permission_hash
user_items["user_signature_hash"] = user_signature_hash
return user_items | [
"def",
"authenticate_with_email_and_pwd",
"(",
"user_email",
",",
"user_password",
")",
":",
"if",
"user_email",
"is",
"None",
"or",
"user_password",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Could not authenticate user. Missing username or password'",
")",
"upload... | Authenticate the user by passing the email and password.
This function avoids prompting the command line for user credentials and is useful for calling tools programmatically | [
"Authenticate",
"the",
"user",
"by",
"passing",
"the",
"email",
"and",
"password",
".",
"This",
"function",
"avoids",
"prompting",
"the",
"command",
"line",
"for",
"user",
"credentials",
"and",
"is",
"useful",
"for",
"calling",
"tools",
"programmatically"
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/uploader.py#L467-L494 | train | 201,643 |
mapillary/mapillary_tools | mapillary_tools/uploader.py | upload_file | def upload_file(filepath, max_attempts, url, permission, signature, key=None, aws_key=None):
'''
Upload file at filepath.
'''
if max_attempts == None:
max_attempts = MAX_ATTEMPTS
filename = os.path.basename(filepath)
s3_filename = filename
try:
s3_filename = ExifRead(filepath).exif_name()
except:
pass
filepath_keep_original = processing.processed_images_rootpath(filepath)
filepath_in = filepath
if os.path.isfile(filepath_keep_original):
filepath = filepath_keep_original
# add S3 'path' if given
if key is None:
s3_key = s3_filename
else:
s3_key = key + s3_filename
parameters = {"key": s3_key, "AWSAccessKeyId": aws_key, "acl": "private",
"policy": permission, "signature": signature, "Content-Type": "image/jpeg"}
with open(filepath, "rb") as f:
encoded_string = f.read()
data, headers = encode_multipart(
parameters, {'file': {'filename': filename, 'content': encoded_string}})
if (DRY_RUN == False):
displayed_upload_error = False
for attempt in range(max_attempts):
# Initialize response before each attempt
response = None
try:
request = urllib2.Request(url, data=data, headers=headers)
response = urllib2.urlopen(request)
if response.getcode() == 204:
create_upload_log(filepath_in, "upload_success")
if displayed_upload_error == True:
print("Successful upload of {} on attempt {}".format(
filename, attempt))
else:
create_upload_log(filepath_in, "upload_failed")
break # attempts
except urllib2.HTTPError as e:
print("HTTP error: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
displayed_upload_error = True
time.sleep(5)
except urllib2.URLError as e:
print("URL error: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
time.sleep(5)
except httplib.HTTPException as e:
print("HTTP exception: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
time.sleep(5)
except OSError as e:
print("OS error: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
time.sleep(5)
except socket.timeout as e:
# Specific timeout handling for Python 2.7
print("Timeout error: {} (retrying), will attempt upload again for {} more times".format(
filename, max_attempts - attempt - 1))
finally:
if response is not None:
response.close()
else:
print('DRY_RUN, Skipping actual image upload. Use this for debug only.') | python | def upload_file(filepath, max_attempts, url, permission, signature, key=None, aws_key=None):
'''
Upload file at filepath.
'''
if max_attempts == None:
max_attempts = MAX_ATTEMPTS
filename = os.path.basename(filepath)
s3_filename = filename
try:
s3_filename = ExifRead(filepath).exif_name()
except:
pass
filepath_keep_original = processing.processed_images_rootpath(filepath)
filepath_in = filepath
if os.path.isfile(filepath_keep_original):
filepath = filepath_keep_original
# add S3 'path' if given
if key is None:
s3_key = s3_filename
else:
s3_key = key + s3_filename
parameters = {"key": s3_key, "AWSAccessKeyId": aws_key, "acl": "private",
"policy": permission, "signature": signature, "Content-Type": "image/jpeg"}
with open(filepath, "rb") as f:
encoded_string = f.read()
data, headers = encode_multipart(
parameters, {'file': {'filename': filename, 'content': encoded_string}})
if (DRY_RUN == False):
displayed_upload_error = False
for attempt in range(max_attempts):
# Initialize response before each attempt
response = None
try:
request = urllib2.Request(url, data=data, headers=headers)
response = urllib2.urlopen(request)
if response.getcode() == 204:
create_upload_log(filepath_in, "upload_success")
if displayed_upload_error == True:
print("Successful upload of {} on attempt {}".format(
filename, attempt))
else:
create_upload_log(filepath_in, "upload_failed")
break # attempts
except urllib2.HTTPError as e:
print("HTTP error: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
displayed_upload_error = True
time.sleep(5)
except urllib2.URLError as e:
print("URL error: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
time.sleep(5)
except httplib.HTTPException as e:
print("HTTP exception: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
time.sleep(5)
except OSError as e:
print("OS error: {} on {}, will attempt upload again for {} more times".format(
e, filename, max_attempts - attempt - 1))
time.sleep(5)
except socket.timeout as e:
# Specific timeout handling for Python 2.7
print("Timeout error: {} (retrying), will attempt upload again for {} more times".format(
filename, max_attempts - attempt - 1))
finally:
if response is not None:
response.close()
else:
print('DRY_RUN, Skipping actual image upload. Use this for debug only.') | [
"def",
"upload_file",
"(",
"filepath",
",",
"max_attempts",
",",
"url",
",",
"permission",
",",
"signature",
",",
"key",
"=",
"None",
",",
"aws_key",
"=",
"None",
")",
":",
"if",
"max_attempts",
"==",
"None",
":",
"max_attempts",
"=",
"MAX_ATTEMPTS",
"file... | Upload file at filepath. | [
"Upload",
"file",
"at",
"filepath",
"."
] | 816785e90c589cae6e8e34a5530ce8417d29591c | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/uploader.py#L629-L706 | train | 201,644 |
pinterest/pymemcache | pymemcache/client/murmur3.py | murmur3_32 | def murmur3_32(data, seed=0):
"""MurmurHash3 was written by Austin Appleby, and is placed in the
public domain. The author hereby disclaims copyright to this source
code."""
c1 = 0xcc9e2d51
c2 = 0x1b873593
length = len(data)
h1 = seed
roundedEnd = (length & 0xfffffffc) # round down to 4 byte block
for i in range(0, roundedEnd, 4):
# little endian load order
k1 = (ord(data[i]) & 0xff) | ((ord(data[i + 1]) & 0xff) << 8) | \
((ord(data[i + 2]) & 0xff) << 16) | (ord(data[i + 3]) << 24)
k1 *= c1
k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15)
k1 *= c2
h1 ^= k1
h1 = (h1 << 13) | ((h1 & 0xffffffff) >> 19) # ROTL32(h1,13)
h1 = h1 * 5 + 0xe6546b64
# tail
k1 = 0
val = length & 0x03
if val == 3:
k1 = (ord(data[roundedEnd + 2]) & 0xff) << 16
# fallthrough
if val in [2, 3]:
k1 |= (ord(data[roundedEnd + 1]) & 0xff) << 8
# fallthrough
if val in [1, 2, 3]:
k1 |= ord(data[roundedEnd]) & 0xff
k1 *= c1
k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= length
# fmix(h1)
h1 ^= ((h1 & 0xffffffff) >> 16)
h1 *= 0x85ebca6b
h1 ^= ((h1 & 0xffffffff) >> 13)
h1 *= 0xc2b2ae35
h1 ^= ((h1 & 0xffffffff) >> 16)
return h1 & 0xffffffff | python | def murmur3_32(data, seed=0):
"""MurmurHash3 was written by Austin Appleby, and is placed in the
public domain. The author hereby disclaims copyright to this source
code."""
c1 = 0xcc9e2d51
c2 = 0x1b873593
length = len(data)
h1 = seed
roundedEnd = (length & 0xfffffffc) # round down to 4 byte block
for i in range(0, roundedEnd, 4):
# little endian load order
k1 = (ord(data[i]) & 0xff) | ((ord(data[i + 1]) & 0xff) << 8) | \
((ord(data[i + 2]) & 0xff) << 16) | (ord(data[i + 3]) << 24)
k1 *= c1
k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15)
k1 *= c2
h1 ^= k1
h1 = (h1 << 13) | ((h1 & 0xffffffff) >> 19) # ROTL32(h1,13)
h1 = h1 * 5 + 0xe6546b64
# tail
k1 = 0
val = length & 0x03
if val == 3:
k1 = (ord(data[roundedEnd + 2]) & 0xff) << 16
# fallthrough
if val in [2, 3]:
k1 |= (ord(data[roundedEnd + 1]) & 0xff) << 8
# fallthrough
if val in [1, 2, 3]:
k1 |= ord(data[roundedEnd]) & 0xff
k1 *= c1
k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= length
# fmix(h1)
h1 ^= ((h1 & 0xffffffff) >> 16)
h1 *= 0x85ebca6b
h1 ^= ((h1 & 0xffffffff) >> 13)
h1 *= 0xc2b2ae35
h1 ^= ((h1 & 0xffffffff) >> 16)
return h1 & 0xffffffff | [
"def",
"murmur3_32",
"(",
"data",
",",
"seed",
"=",
"0",
")",
":",
"c1",
"=",
"0xcc9e2d51",
"c2",
"=",
"0x1b873593",
"length",
"=",
"len",
"(",
"data",
")",
"h1",
"=",
"seed",
"roundedEnd",
"=",
"(",
"length",
"&",
"0xfffffffc",
")",
"# round down to 4... | MurmurHash3 was written by Austin Appleby, and is placed in the
public domain. The author hereby disclaims copyright to this source
code. | [
"MurmurHash3",
"was",
"written",
"by",
"Austin",
"Appleby",
"and",
"is",
"placed",
"in",
"the",
"public",
"domain",
".",
"The",
"author",
"hereby",
"disclaims",
"copyright",
"to",
"this",
"source",
"code",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/murmur3.py#L1-L51 | train | 201,645 |
pinterest/pymemcache | pymemcache/client/base.py | _readline | def _readline(sock, buf):
"""Read line of text from the socket.
Read a line of text (delimited by "\r\n") from the socket, and
return that line along with any trailing characters read from the
socket.
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
Returns:
A tuple of (buf, line) where line is the full line read from the
socket (minus the "\r\n" characters) and buf is any trailing
characters read after the "\r\n" was found (which may be an empty
string).
"""
chunks = []
last_char = b''
while True:
# We're reading in chunks, so "\r\n" could appear in one chunk,
# or across the boundary of two chunks, so we check for both
# cases.
# This case must appear first, since the buffer could have
# later \r\n characters in it and we want to get the first \r\n.
if last_char == b'\r' and buf[0:1] == b'\n':
# Strip the last character from the last chunk.
chunks[-1] = chunks[-1][:-1]
return buf[1:], b''.join(chunks)
elif buf.find(b'\r\n') != -1:
before, sep, after = buf.partition(b"\r\n")
chunks.append(before)
return after, b''.join(chunks)
if buf:
chunks.append(buf)
last_char = buf[-1:]
buf = _recv(sock, RECV_SIZE)
if not buf:
raise MemcacheUnexpectedCloseError() | python | def _readline(sock, buf):
"""Read line of text from the socket.
Read a line of text (delimited by "\r\n") from the socket, and
return that line along with any trailing characters read from the
socket.
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
Returns:
A tuple of (buf, line) where line is the full line read from the
socket (minus the "\r\n" characters) and buf is any trailing
characters read after the "\r\n" was found (which may be an empty
string).
"""
chunks = []
last_char = b''
while True:
# We're reading in chunks, so "\r\n" could appear in one chunk,
# or across the boundary of two chunks, so we check for both
# cases.
# This case must appear first, since the buffer could have
# later \r\n characters in it and we want to get the first \r\n.
if last_char == b'\r' and buf[0:1] == b'\n':
# Strip the last character from the last chunk.
chunks[-1] = chunks[-1][:-1]
return buf[1:], b''.join(chunks)
elif buf.find(b'\r\n') != -1:
before, sep, after = buf.partition(b"\r\n")
chunks.append(before)
return after, b''.join(chunks)
if buf:
chunks.append(buf)
last_char = buf[-1:]
buf = _recv(sock, RECV_SIZE)
if not buf:
raise MemcacheUnexpectedCloseError() | [
"def",
"_readline",
"(",
"sock",
",",
"buf",
")",
":",
"chunks",
"=",
"[",
"]",
"last_char",
"=",
"b''",
"while",
"True",
":",
"# We're reading in chunks, so \"\\r\\n\" could appear in one chunk,",
"# or across the boundary of two chunks, so we check for both",
"# cases.",
... | Read line of text from the socket.
Read a line of text (delimited by "\r\n") from the socket, and
return that line along with any trailing characters read from the
socket.
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
Returns:
A tuple of (buf, line) where line is the full line read from the
socket (minus the "\r\n" characters) and buf is any trailing
characters read after the "\r\n" was found (which may be an empty
string). | [
"Read",
"line",
"of",
"text",
"from",
"the",
"socket",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L1103-L1148 | train | 201,646 |
pinterest/pymemcache | pymemcache/client/base.py | _readvalue | def _readvalue(sock, buf, size):
"""Read specified amount of bytes from the socket.
Read size bytes, followed by the "\r\n" characters, from the socket,
and return those bytes and any trailing bytes read after the "\r\n".
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
size: Integer, number of bytes to read from the socket.
Returns:
A tuple of (buf, value) where value is the bytes read from the
socket (there will be exactly size bytes) and buf is trailing
characters read after the "\r\n" following the bytes (but not
including the \r\n).
"""
chunks = []
rlen = size + 2
while rlen - len(buf) > 0:
if buf:
rlen -= len(buf)
chunks.append(buf)
buf = _recv(sock, RECV_SIZE)
if not buf:
raise MemcacheUnexpectedCloseError()
# Now we need to remove the \r\n from the end. There are two cases we care
# about: the \r\n is all in the last buffer, or only the \n is in the last
# buffer, and we need to remove the \r from the penultimate buffer.
if rlen == 1:
# replace the last chunk with the same string minus the last character,
# which is always '\r' in this case.
chunks[-1] = chunks[-1][:-1]
else:
# Just remove the "\r\n" from the latest chunk
chunks.append(buf[:rlen - 2])
return buf[rlen:], b''.join(chunks) | python | def _readvalue(sock, buf, size):
"""Read specified amount of bytes from the socket.
Read size bytes, followed by the "\r\n" characters, from the socket,
and return those bytes and any trailing bytes read after the "\r\n".
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
size: Integer, number of bytes to read from the socket.
Returns:
A tuple of (buf, value) where value is the bytes read from the
socket (there will be exactly size bytes) and buf is trailing
characters read after the "\r\n" following the bytes (but not
including the \r\n).
"""
chunks = []
rlen = size + 2
while rlen - len(buf) > 0:
if buf:
rlen -= len(buf)
chunks.append(buf)
buf = _recv(sock, RECV_SIZE)
if not buf:
raise MemcacheUnexpectedCloseError()
# Now we need to remove the \r\n from the end. There are two cases we care
# about: the \r\n is all in the last buffer, or only the \n is in the last
# buffer, and we need to remove the \r from the penultimate buffer.
if rlen == 1:
# replace the last chunk with the same string minus the last character,
# which is always '\r' in this case.
chunks[-1] = chunks[-1][:-1]
else:
# Just remove the "\r\n" from the latest chunk
chunks.append(buf[:rlen - 2])
return buf[rlen:], b''.join(chunks) | [
"def",
"_readvalue",
"(",
"sock",
",",
"buf",
",",
"size",
")",
":",
"chunks",
"=",
"[",
"]",
"rlen",
"=",
"size",
"+",
"2",
"while",
"rlen",
"-",
"len",
"(",
"buf",
")",
">",
"0",
":",
"if",
"buf",
":",
"rlen",
"-=",
"len",
"(",
"buf",
")",
... | Read specified amount of bytes from the socket.
Read size bytes, followed by the "\r\n" characters, from the socket,
and return those bytes and any trailing bytes read after the "\r\n".
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
size: Integer, number of bytes to read from the socket.
Returns:
A tuple of (buf, value) where value is the bytes read from the
socket (there will be exactly size bytes) and buf is trailing
characters read after the "\r\n" following the bytes (but not
including the \r\n). | [
"Read",
"specified",
"amount",
"of",
"bytes",
"from",
"the",
"socket",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L1151-L1193 | train | 201,647 |
pinterest/pymemcache | pymemcache/client/base.py | Client.close | def close(self):
"""Close the connection to memcached, if it is open. The next call to a
method that requires a connection will re-open it."""
if self.sock is not None:
try:
self.sock.close()
except Exception:
pass
finally:
self.sock = None | python | def close(self):
"""Close the connection to memcached, if it is open. The next call to a
method that requires a connection will re-open it."""
if self.sock is not None:
try:
self.sock.close()
except Exception:
pass
finally:
self.sock = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"sock",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"sock",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"pass",
"finally",
":",
"self",
".",
"sock",
"=",
"None"
] | Close the connection to memcached, if it is open. The next call to a
method that requires a connection will re-open it. | [
"Close",
"the",
"connection",
"to",
"memcached",
"if",
"it",
"is",
"open",
".",
"The",
"next",
"call",
"to",
"a",
"method",
"that",
"requires",
"a",
"connection",
"will",
"re",
"-",
"open",
"it",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L285-L294 | train | 201,648 |
pinterest/pymemcache | pymemcache/client/base.py | Client.set | def set(self, key, value, expire=0, noreply=None):
"""
The memcached "set" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If no exception is raised, always returns True. If an exception is
raised, the set may or may not have occurred. If noreply is True,
then a successful return does not guarantee a successful set.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'set', {key: value}, expire, noreply)[key] | python | def set(self, key, value, expire=0, noreply=None):
"""
The memcached "set" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If no exception is raised, always returns True. If an exception is
raised, the set may or may not have occurred. If noreply is True,
then a successful return does not guarantee a successful set.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'set', {key: value}, expire, noreply)[key] | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"return",
"self",
".",
"_store_cmd",
"(",
"b'set'... | The memcached "set" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If no exception is raised, always returns True. If an exception is
raised, the set may or may not have occurred. If noreply is True,
then a successful return does not guarantee a successful set. | [
"The",
"memcached",
"set",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L296-L315 | train | 201,649 |
pinterest/pymemcache | pymemcache/client/base.py | Client.set_many | def set_many(self, values, expire=0, noreply=None):
"""
A convenience function for setting multiple values.
Args:
values: dict(str, str), a dict of keys and values, see class docs
for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
Returns a list of keys that failed to be inserted.
If noreply is True, always returns empty list.
"""
if noreply is None:
noreply = self.default_noreply
result = self._store_cmd(b'set', values, expire, noreply)
return [k for k, v in six.iteritems(result) if not v] | python | def set_many(self, values, expire=0, noreply=None):
"""
A convenience function for setting multiple values.
Args:
values: dict(str, str), a dict of keys and values, see class docs
for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
Returns a list of keys that failed to be inserted.
If noreply is True, always returns empty list.
"""
if noreply is None:
noreply = self.default_noreply
result = self._store_cmd(b'set', values, expire, noreply)
return [k for k, v in six.iteritems(result) if not v] | [
"def",
"set_many",
"(",
"self",
",",
"values",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"result",
"=",
"self",
".",
"_store_cmd",
"(",
"b'set'",
... | A convenience function for setting multiple values.
Args:
values: dict(str, str), a dict of keys and values, see class docs
for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
Returns a list of keys that failed to be inserted.
If noreply is True, always returns empty list. | [
"A",
"convenience",
"function",
"for",
"setting",
"multiple",
"values",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L317-L336 | train | 201,650 |
pinterest/pymemcache | pymemcache/client/base.py | Client.add | def add(self, key, value, expire=0, noreply=None):
"""
The memcached "add" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, the return value is always True. Otherwise the
return value is True if the value was stored, and False if it was
not (because the key already existed).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'add', {key: value}, expire, noreply)[key] | python | def add(self, key, value, expire=0, noreply=None):
"""
The memcached "add" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, the return value is always True. Otherwise the
return value is True if the value was stored, and False if it was
not (because the key already existed).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'add', {key: value}, expire, noreply)[key] | [
"def",
"add",
"(",
"self",
",",
"key",
",",
"value",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"return",
"self",
".",
"_store_cmd",
"(",
"b'add'... | The memcached "add" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, the return value is always True. Otherwise the
return value is True if the value was stored, and False if it was
not (because the key already existed). | [
"The",
"memcached",
"add",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L340-L359 | train | 201,651 |
pinterest/pymemcache | pymemcache/client/base.py | Client.replace | def replace(self, key, value, expire=0, noreply=None):
"""
The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'replace', {key: value}, expire, noreply)[key] | python | def replace(self, key, value, expire=0, noreply=None):
"""
The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'replace', {key: value}, expire, noreply)[key] | [
"def",
"replace",
"(",
"self",
",",
"key",
",",
"value",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"return",
"self",
".",
"_store_cmd",
"(",
"b'... | The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist). | [
"The",
"memcached",
"replace",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L361-L380 | train | 201,652 |
pinterest/pymemcache | pymemcache/client/base.py | Client.append | def append(self, key, value, expire=0, noreply=None):
"""
The memcached "append" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'append', {key: value}, expire, noreply)[key] | python | def append(self, key, value, expire=0, noreply=None):
"""
The memcached "append" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'append', {key: value}, expire, noreply)[key] | [
"def",
"append",
"(",
"self",
",",
"key",
",",
"value",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"return",
"self",
".",
"_store_cmd",
"(",
"b'a... | The memcached "append" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. | [
"The",
"memcached",
"append",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L382-L399 | train | 201,653 |
pinterest/pymemcache | pymemcache/client/base.py | Client.prepend | def prepend(self, key, value, expire=0, noreply=None):
"""
The memcached "prepend" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key] | python | def prepend(self, key, value, expire=0, noreply=None):
"""
The memcached "prepend" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key] | [
"def",
"prepend",
"(",
"self",
",",
"key",
",",
"value",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"return",
"self",
".",
"_store_cmd",
"(",
"b'... | The memcached "prepend" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. | [
"The",
"memcached",
"prepend",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L401-L418 | train | 201,654 |
pinterest/pymemcache | pymemcache/client/base.py | Client.cas | def cas(self, key, value, cas, expire=0, noreply=False):
"""
The memcached "cas" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
cas: int or str that only contains the characters '0'-'9'.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns True. Otherwise returns None if
the key didn't exist, False if it existed but had a different cas
value and True if it existed and was changed.
"""
return self._store_cmd(b'cas', {key: value}, expire, noreply, cas)[key] | python | def cas(self, key, value, cas, expire=0, noreply=False):
"""
The memcached "cas" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
cas: int or str that only contains the characters '0'-'9'.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns True. Otherwise returns None if
the key didn't exist, False if it existed but had a different cas
value and True if it existed and was changed.
"""
return self._store_cmd(b'cas', {key: value}, expire, noreply, cas)[key] | [
"def",
"cas",
"(",
"self",
",",
"key",
",",
"value",
",",
"cas",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"False",
")",
":",
"return",
"self",
".",
"_store_cmd",
"(",
"b'cas'",
",",
"{",
"key",
":",
"value",
"}",
",",
"expire",
",",
"noreply... | The memcached "cas" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
cas: int or str that only contains the characters '0'-'9'.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns True. Otherwise returns None if
the key didn't exist, False if it existed but had a different cas
value and True if it existed and was changed. | [
"The",
"memcached",
"cas",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L420-L437 | train | 201,655 |
pinterest/pymemcache | pymemcache/client/base.py | Client.get | def get(self, key, default=None):
"""
The memcached "get" command, but only for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
Returns:
The value for the key, or default if the key wasn't found.
"""
return self._fetch_cmd(b'get', [key], False).get(key, default) | python | def get(self, key, default=None):
"""
The memcached "get" command, but only for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
Returns:
The value for the key, or default if the key wasn't found.
"""
return self._fetch_cmd(b'get', [key], False).get(key, default) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"return",
"self",
".",
"_fetch_cmd",
"(",
"b'get'",
",",
"[",
"key",
"]",
",",
"False",
")",
".",
"get",
"(",
"key",
",",
"default",
")"
] | The memcached "get" command, but only for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
Returns:
The value for the key, or default if the key wasn't found. | [
"The",
"memcached",
"get",
"command",
"but",
"only",
"for",
"one",
"key",
"as",
"a",
"convenience",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L439-L450 | train | 201,656 |
pinterest/pymemcache | pymemcache/client/base.py | Client.gets | def gets(self, key, default=None, cas_default=None):
"""
The memcached "gets" command for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
cas_default: same behaviour as default argument.
Returns:
A tuple of (value, cas)
or (default, cas_defaults) if the key was not found.
"""
defaults = (default, cas_default)
return self._fetch_cmd(b'gets', [key], True).get(key, defaults) | python | def gets(self, key, default=None, cas_default=None):
"""
The memcached "gets" command for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
cas_default: same behaviour as default argument.
Returns:
A tuple of (value, cas)
or (default, cas_defaults) if the key was not found.
"""
defaults = (default, cas_default)
return self._fetch_cmd(b'gets', [key], True).get(key, defaults) | [
"def",
"gets",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
",",
"cas_default",
"=",
"None",
")",
":",
"defaults",
"=",
"(",
"default",
",",
"cas_default",
")",
"return",
"self",
".",
"_fetch_cmd",
"(",
"b'gets'",
",",
"[",
"key",
"]",
",",
... | The memcached "gets" command for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
cas_default: same behaviour as default argument.
Returns:
A tuple of (value, cas)
or (default, cas_defaults) if the key was not found. | [
"The",
"memcached",
"gets",
"command",
"for",
"one",
"key",
"as",
"a",
"convenience",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L471-L485 | train | 201,657 |
pinterest/pymemcache | pymemcache/client/base.py | Client.delete | def delete(self, key, noreply=None):
"""
The memcached "delete" command.
Args:
key: str, see class docs for details.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the key was deleted, and False if it wasn't found.
"""
if noreply is None:
noreply = self.default_noreply
cmd = b'delete ' + self.check_key(key)
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'delete', noreply)
if noreply:
return True
return results[0] == b'DELETED' | python | def delete(self, key, noreply=None):
"""
The memcached "delete" command.
Args:
key: str, see class docs for details.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the key was deleted, and False if it wasn't found.
"""
if noreply is None:
noreply = self.default_noreply
cmd = b'delete ' + self.check_key(key)
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'delete', noreply)
if noreply:
return True
return results[0] == b'DELETED' | [
"def",
"delete",
"(",
"self",
",",
"key",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"cmd",
"=",
"b'delete '",
"+",
"self",
".",
"check_key",
"(",
"key",
")",
"if",
"nore... | The memcached "delete" command.
Args:
key: str, see class docs for details.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the key was deleted, and False if it wasn't found. | [
"The",
"memcached",
"delete",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L504-L526 | train | 201,658 |
pinterest/pymemcache | pymemcache/client/base.py | Client.delete_many | def delete_many(self, keys, noreply=None):
"""
A convenience function to delete multiple keys.
Args:
keys: list(str), the list of keys to delete.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. If an exception is raised then all, some or none of the keys
may have been deleted. Otherwise all the keys have been sent to
memcache for deletion and if noreply is False, they have been
acknowledged by memcache.
"""
if not keys:
return True
if noreply is None:
noreply = self.default_noreply
cmds = []
for key in keys:
cmds.append(
b'delete ' + self.check_key(key) +
(b' noreply' if noreply else b'') +
b'\r\n')
self._misc_cmd(cmds, b'delete', noreply)
return True | python | def delete_many(self, keys, noreply=None):
"""
A convenience function to delete multiple keys.
Args:
keys: list(str), the list of keys to delete.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. If an exception is raised then all, some or none of the keys
may have been deleted. Otherwise all the keys have been sent to
memcache for deletion and if noreply is False, they have been
acknowledged by memcache.
"""
if not keys:
return True
if noreply is None:
noreply = self.default_noreply
cmds = []
for key in keys:
cmds.append(
b'delete ' + self.check_key(key) +
(b' noreply' if noreply else b'') +
b'\r\n')
self._misc_cmd(cmds, b'delete', noreply)
return True | [
"def",
"delete_many",
"(",
"self",
",",
"keys",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"not",
"keys",
":",
"return",
"True",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"cmds",
"=",
"[",
"]",
"for",
"key"... | A convenience function to delete multiple keys.
Args:
keys: list(str), the list of keys to delete.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. If an exception is raised then all, some or none of the keys
may have been deleted. Otherwise all the keys have been sent to
memcache for deletion and if noreply is False, they have been
acknowledged by memcache. | [
"A",
"convenience",
"function",
"to",
"delete",
"multiple",
"keys",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L528-L556 | train | 201,659 |
pinterest/pymemcache | pymemcache/client/base.py | Client.incr | def incr(self, key, value, noreply=False):
"""
The memcached "incr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
"""
key = self.check_key(key)
cmd = b'incr ' + key + b' ' + six.text_type(value).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'incr', noreply)
if noreply:
return None
if results[0] == b'NOT_FOUND':
return None
return int(results[0]) | python | def incr(self, key, value, noreply=False):
"""
The memcached "incr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
"""
key = self.check_key(key)
cmd = b'incr ' + key + b' ' + six.text_type(value).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'incr', noreply)
if noreply:
return None
if results[0] == b'NOT_FOUND':
return None
return int(results[0]) | [
"def",
"incr",
"(",
"self",
",",
"key",
",",
"value",
",",
"noreply",
"=",
"False",
")",
":",
"key",
"=",
"self",
".",
"check_key",
"(",
"key",
")",
"cmd",
"=",
"b'incr '",
"+",
"key",
"+",
"b' '",
"+",
"six",
".",
"text_type",
"(",
"value",
")",... | The memcached "incr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found. | [
"The",
"memcached",
"incr",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L560-L583 | train | 201,660 |
pinterest/pymemcache | pymemcache/client/base.py | Client.decr | def decr(self, key, value, noreply=False):
"""
The memcached "decr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
"""
key = self.check_key(key)
cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'decr', noreply)
if noreply:
return None
if results[0] == b'NOT_FOUND':
return None
return int(results[0]) | python | def decr(self, key, value, noreply=False):
"""
The memcached "decr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
"""
key = self.check_key(key)
cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'decr', noreply)
if noreply:
return None
if results[0] == b'NOT_FOUND':
return None
return int(results[0]) | [
"def",
"decr",
"(",
"self",
",",
"key",
",",
"value",
",",
"noreply",
"=",
"False",
")",
":",
"key",
"=",
"self",
".",
"check_key",
"(",
"key",
")",
"cmd",
"=",
"b'decr '",
"+",
"key",
"+",
"b' '",
"+",
"six",
".",
"text_type",
"(",
"value",
")",... | The memcached "decr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found. | [
"The",
"memcached",
"decr",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L585-L608 | train | 201,661 |
pinterest/pymemcache | pymemcache/client/base.py | Client.touch | def touch(self, key, expire=0, noreply=None):
"""
The memcached "touch" command.
Args:
key: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True if the expiration time was updated, False if the key wasn't
found.
"""
if noreply is None:
noreply = self.default_noreply
key = self.check_key(key)
cmd = b'touch ' + key + b' ' + six.text_type(expire).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'touch', noreply)
if noreply:
return True
return results[0] == b'TOUCHED' | python | def touch(self, key, expire=0, noreply=None):
"""
The memcached "touch" command.
Args:
key: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True if the expiration time was updated, False if the key wasn't
found.
"""
if noreply is None:
noreply = self.default_noreply
key = self.check_key(key)
cmd = b'touch ' + key + b' ' + six.text_type(expire).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'touch', noreply)
if noreply:
return True
return results[0] == b'TOUCHED' | [
"def",
"touch",
"(",
"self",
",",
"key",
",",
"expire",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"key",
"=",
"self",
".",
"check_key",
"(",
"key",
")",
"cmd... | The memcached "touch" command.
Args:
key: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True if the expiration time was updated, False if the key wasn't
found. | [
"The",
"memcached",
"touch",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L610-L635 | train | 201,662 |
pinterest/pymemcache | pymemcache/client/base.py | Client.stats | def stats(self, *args):
"""
The memcached "stats" command.
The returned keys depend on what the "stats" command returns.
A best effort is made to convert values to appropriate Python
types, defaulting to strings when a conversion cannot be made.
Args:
*arg: extra string arguments to the "stats" command. See the
memcached protocol documentation for more information.
Returns:
A dict of the returned stats.
"""
result = self._fetch_cmd(b'stats', args, False)
for key, value in six.iteritems(result):
converter = STAT_TYPES.get(key, int)
try:
result[key] = converter(value)
except Exception:
pass
return result | python | def stats(self, *args):
"""
The memcached "stats" command.
The returned keys depend on what the "stats" command returns.
A best effort is made to convert values to appropriate Python
types, defaulting to strings when a conversion cannot be made.
Args:
*arg: extra string arguments to the "stats" command. See the
memcached protocol documentation for more information.
Returns:
A dict of the returned stats.
"""
result = self._fetch_cmd(b'stats', args, False)
for key, value in six.iteritems(result):
converter = STAT_TYPES.get(key, int)
try:
result[key] = converter(value)
except Exception:
pass
return result | [
"def",
"stats",
"(",
"self",
",",
"*",
"args",
")",
":",
"result",
"=",
"self",
".",
"_fetch_cmd",
"(",
"b'stats'",
",",
"args",
",",
"False",
")",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"result",
")",
":",
"converter",
"="... | The memcached "stats" command.
The returned keys depend on what the "stats" command returns.
A best effort is made to convert values to appropriate Python
types, defaulting to strings when a conversion cannot be made.
Args:
*arg: extra string arguments to the "stats" command. See the
memcached protocol documentation for more information.
Returns:
A dict of the returned stats. | [
"The",
"memcached",
"stats",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L637-L661 | train | 201,663 |
pinterest/pymemcache | pymemcache/client/base.py | Client.cache_memlimit | def cache_memlimit(self, memlimit):
"""
The memcached "cache_memlimit" command.
Args:
memlimit: int, the number of megabytes to set as the new cache memory
limit.
Returns:
If no exception is raised, always returns True.
"""
self._fetch_cmd(b'cache_memlimit', [str(int(memlimit))], False)
return True | python | def cache_memlimit(self, memlimit):
"""
The memcached "cache_memlimit" command.
Args:
memlimit: int, the number of megabytes to set as the new cache memory
limit.
Returns:
If no exception is raised, always returns True.
"""
self._fetch_cmd(b'cache_memlimit', [str(int(memlimit))], False)
return True | [
"def",
"cache_memlimit",
"(",
"self",
",",
"memlimit",
")",
":",
"self",
".",
"_fetch_cmd",
"(",
"b'cache_memlimit'",
",",
"[",
"str",
"(",
"int",
"(",
"memlimit",
")",
")",
"]",
",",
"False",
")",
"return",
"True"
] | The memcached "cache_memlimit" command.
Args:
memlimit: int, the number of megabytes to set as the new cache memory
limit.
Returns:
If no exception is raised, always returns True. | [
"The",
"memcached",
"cache_memlimit",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L663-L676 | train | 201,664 |
pinterest/pymemcache | pymemcache/client/base.py | Client.version | def version(self):
"""
The memcached "version" command.
Returns:
A string of the memcached version.
"""
cmd = b"version\r\n"
results = self._misc_cmd([cmd], b'version', False)
before, _, after = results[0].partition(b' ')
if before != b'VERSION':
raise MemcacheUnknownError(
"Received unexpected response: %s" % results[0])
return after | python | def version(self):
"""
The memcached "version" command.
Returns:
A string of the memcached version.
"""
cmd = b"version\r\n"
results = self._misc_cmd([cmd], b'version', False)
before, _, after = results[0].partition(b' ')
if before != b'VERSION':
raise MemcacheUnknownError(
"Received unexpected response: %s" % results[0])
return after | [
"def",
"version",
"(",
"self",
")",
":",
"cmd",
"=",
"b\"version\\r\\n\"",
"results",
"=",
"self",
".",
"_misc_cmd",
"(",
"[",
"cmd",
"]",
",",
"b'version'",
",",
"False",
")",
"before",
",",
"_",
",",
"after",
"=",
"results",
"[",
"0",
"]",
".",
"... | The memcached "version" command.
Returns:
A string of the memcached version. | [
"The",
"memcached",
"version",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L678-L692 | train | 201,665 |
pinterest/pymemcache | pymemcache/client/base.py | Client.flush_all | def flush_all(self, delay=0, noreply=None):
"""
The memcached "flush_all" command.
Args:
delay: optional int, the number of seconds to wait before flushing,
or zero to flush immediately (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
cmd = b'flush_all ' + six.text_type(delay).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'flush_all', noreply)
if noreply:
return True
return results[0] == b'OK' | python | def flush_all(self, delay=0, noreply=None):
"""
The memcached "flush_all" command.
Args:
delay: optional int, the number of seconds to wait before flushing,
or zero to flush immediately (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
cmd = b'flush_all ' + six.text_type(delay).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'flush_all', noreply)
if noreply:
return True
return results[0] == b'OK' | [
"def",
"flush_all",
"(",
"self",
",",
"delay",
"=",
"0",
",",
"noreply",
"=",
"None",
")",
":",
"if",
"noreply",
"is",
"None",
":",
"noreply",
"=",
"self",
".",
"default_noreply",
"cmd",
"=",
"b'flush_all '",
"+",
"six",
".",
"text_type",
"(",
"delay",... | The memcached "flush_all" command.
Args:
delay: optional int, the number of seconds to wait before flushing,
or zero to flush immediately (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. | [
"The",
"memcached",
"flush_all",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L694-L716 | train | 201,666 |
pinterest/pymemcache | pymemcache/client/base.py | Client.quit | def quit(self):
"""
The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit.
"""
cmd = b"quit\r\n"
self._misc_cmd([cmd], b'quit', True)
self.close() | python | def quit(self):
"""
The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit.
"""
cmd = b"quit\r\n"
self._misc_cmd([cmd], b'quit', True)
self.close() | [
"def",
"quit",
"(",
"self",
")",
":",
"cmd",
"=",
"b\"quit\\r\\n\"",
"self",
".",
"_misc_cmd",
"(",
"[",
"cmd",
"]",
",",
"b'quit'",
",",
"True",
")",
"self",
".",
"close",
"(",
")"
] | The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit. | [
"The",
"memcached",
"quit",
"command",
"."
] | f3a348f4ce2248cce8b398e93e08d984fb9100e5 | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L718-L728 | train | 201,667 |
kblin/ncbi-genome-download | ncbi_genome_download/config.py | _create_list | def _create_list(value, allow_filename=False):
"""Create a list from the input value.
If the input is a list already, return it.
If the input is a comma-separated string, split it.
"""
if isinstance(value, list):
return value
elif isinstance(value, string_type):
if allow_filename and os.path.isfile(value):
with codecs.open(value, 'r', encoding="utf-8") as handle:
return handle.read().splitlines()
return value.split(',')
else:
raise ValueError("Can't create list for input {}".format(value)) | python | def _create_list(value, allow_filename=False):
"""Create a list from the input value.
If the input is a list already, return it.
If the input is a comma-separated string, split it.
"""
if isinstance(value, list):
return value
elif isinstance(value, string_type):
if allow_filename and os.path.isfile(value):
with codecs.open(value, 'r', encoding="utf-8") as handle:
return handle.read().splitlines()
return value.split(',')
else:
raise ValueError("Can't create list for input {}".format(value)) | [
"def",
"_create_list",
"(",
"value",
",",
"allow_filename",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"string_type",
")",
":",
"if",
"allow_filename",
"and",... | Create a list from the input value.
If the input is a list already, return it.
If the input is a comma-separated string, split it. | [
"Create",
"a",
"list",
"from",
"the",
"input",
"value",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/config.py#L316-L331 | train | 201,668 |
kblin/ncbi-genome-download | ncbi_genome_download/config.py | NgdConfig.is_compatible_assembly_level | def is_compatible_assembly_level(self, ncbi_assembly_level):
"""Check if a given ncbi assembly level string matches the configured assembly levels."""
configured_ncbi_strings = [self._LEVELS[level] for level in self.assembly_level]
return ncbi_assembly_level in configured_ncbi_strings | python | def is_compatible_assembly_level(self, ncbi_assembly_level):
"""Check if a given ncbi assembly level string matches the configured assembly levels."""
configured_ncbi_strings = [self._LEVELS[level] for level in self.assembly_level]
return ncbi_assembly_level in configured_ncbi_strings | [
"def",
"is_compatible_assembly_level",
"(",
"self",
",",
"ncbi_assembly_level",
")",
":",
"configured_ncbi_strings",
"=",
"[",
"self",
".",
"_LEVELS",
"[",
"level",
"]",
"for",
"level",
"in",
"self",
".",
"assembly_level",
"]",
"return",
"ncbi_assembly_level",
"in... | Check if a given ncbi assembly level string matches the configured assembly levels. | [
"Check",
"if",
"a",
"given",
"ncbi",
"assembly",
"level",
"string",
"matches",
"the",
"configured",
"assembly",
"levels",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/config.py#L257-L260 | train | 201,669 |
kblin/ncbi-genome-download | ncbi_genome_download/config.py | NgdConfig.from_kwargs | def from_kwargs(cls, **kwargs):
"""Initialise configuration from kwargs."""
config = cls()
for slot in cls.__slots__:
if slot.startswith('_'):
slot = slot[1:]
setattr(config, slot, kwargs.pop(slot, cls.get_default(slot)))
if kwargs:
raise ValueError("Unrecognized option(s): {}".format(kwargs.keys()))
return config | python | def from_kwargs(cls, **kwargs):
"""Initialise configuration from kwargs."""
config = cls()
for slot in cls.__slots__:
if slot.startswith('_'):
slot = slot[1:]
setattr(config, slot, kwargs.pop(slot, cls.get_default(slot)))
if kwargs:
raise ValueError("Unrecognized option(s): {}".format(kwargs.keys()))
return config | [
"def",
"from_kwargs",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"cls",
"(",
")",
"for",
"slot",
"in",
"cls",
".",
"__slots__",
":",
"if",
"slot",
".",
"startswith",
"(",
"'_'",
")",
":",
"slot",
"=",
"slot",
"[",
"1",
":",
"... | Initialise configuration from kwargs. | [
"Initialise",
"configuration",
"from",
"kwargs",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/config.py#L263-L274 | train | 201,670 |
kblin/ncbi-genome-download | ncbi_genome_download/config.py | NgdConfig.from_namespace | def from_namespace(cls, namespace):
"""Initialise from argparser Namespace object."""
config = cls()
for slot in cls.__slots__:
if slot.startswith('_'):
slot = slot[1:]
if not hasattr(namespace, slot):
continue
setattr(config, slot, getattr(namespace, slot))
return config | python | def from_namespace(cls, namespace):
"""Initialise from argparser Namespace object."""
config = cls()
for slot in cls.__slots__:
if slot.startswith('_'):
slot = slot[1:]
if not hasattr(namespace, slot):
continue
setattr(config, slot, getattr(namespace, slot))
return config | [
"def",
"from_namespace",
"(",
"cls",
",",
"namespace",
")",
":",
"config",
"=",
"cls",
"(",
")",
"for",
"slot",
"in",
"cls",
".",
"__slots__",
":",
"if",
"slot",
".",
"startswith",
"(",
"'_'",
")",
":",
"slot",
"=",
"slot",
"[",
"1",
":",
"]",
"i... | Initialise from argparser Namespace object. | [
"Initialise",
"from",
"argparser",
"Namespace",
"object",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/config.py#L277-L287 | train | 201,671 |
kblin/ncbi-genome-download | ncbi_genome_download/config.py | NgdConfig.get_default | def get_default(cls, category):
"""Get the default value of a given category."""
value = cls._DEFAULTS[category]
if not value or not isinstance(value, list):
return value
return value[0] | python | def get_default(cls, category):
"""Get the default value of a given category."""
value = cls._DEFAULTS[category]
if not value or not isinstance(value, list):
return value
return value[0] | [
"def",
"get_default",
"(",
"cls",
",",
"category",
")",
":",
"value",
"=",
"cls",
".",
"_DEFAULTS",
"[",
"category",
"]",
"if",
"not",
"value",
"or",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"value",
"return",
"value",
"[",
... | Get the default value of a given category. | [
"Get",
"the",
"default",
"value",
"of",
"a",
"given",
"category",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/config.py#L290-L295 | train | 201,672 |
kblin/ncbi-genome-download | ncbi_genome_download/config.py | NgdConfig.get_choices | def get_choices(cls, category):
"""Get all available options for a category."""
value = cls._DEFAULTS[category]
if not isinstance(value, list):
raise ValueError("{} does not offer choices".format(category))
return value | python | def get_choices(cls, category):
"""Get all available options for a category."""
value = cls._DEFAULTS[category]
if not isinstance(value, list):
raise ValueError("{} does not offer choices".format(category))
return value | [
"def",
"get_choices",
"(",
"cls",
",",
"category",
")",
":",
"value",
"=",
"cls",
".",
"_DEFAULTS",
"[",
"category",
"]",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"{} does not offer choices\"",
".",
"for... | Get all available options for a category. | [
"Get",
"all",
"available",
"options",
"for",
"a",
"category",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/config.py#L298-L303 | train | 201,673 |
kblin/ncbi-genome-download | ncbi_genome_download/__main__.py | main | def main():
"""Build and parse command line."""
parser = argument_parser(version=__version__)
args = parser.parse_args()
if args.debug:
log_level = logging.DEBUG
elif args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
max_retries = args.retries
attempts = 0
ret = args_download(args)
while ret == 75 and attempts < max_retries:
attempts += 1
logging.error(
'Downloading from NCBI failed due to a connection error, retrying. Retries so far: %s',
attempts)
ret = args_download(args)
return ret | python | def main():
"""Build and parse command line."""
parser = argument_parser(version=__version__)
args = parser.parse_args()
if args.debug:
log_level = logging.DEBUG
elif args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
max_retries = args.retries
attempts = 0
ret = args_download(args)
while ret == 75 and attempts < max_retries:
attempts += 1
logging.error(
'Downloading from NCBI failed due to a connection error, retrying. Retries so far: %s',
attempts)
ret = args_download(args)
return ret | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argument_parser",
"(",
"version",
"=",
"__version__",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"debug",
":",
"log_level",
"=",
"logging",
".",
"DEBUG",
"elif",
"args",
"."... | Build and parse command line. | [
"Build",
"and",
"parse",
"command",
"line",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/__main__.py#L8-L32 | train | 201,674 |
kblin/ncbi-genome-download | ncbi_genome_download/metadata.py | get | def get(columns=None):
"""Get or create MetaData singleton."""
if columns is None:
columns = _DEFAULT_COLUMNS
global _METADATA
if not _METADATA:
_METADATA = MetaData(columns)
return _METADATA | python | def get(columns=None):
"""Get or create MetaData singleton."""
if columns is None:
columns = _DEFAULT_COLUMNS
global _METADATA
if not _METADATA:
_METADATA = MetaData(columns)
return _METADATA | [
"def",
"get",
"(",
"columns",
"=",
"None",
")",
":",
"if",
"columns",
"is",
"None",
":",
"columns",
"=",
"_DEFAULT_COLUMNS",
"global",
"_METADATA",
"if",
"not",
"_METADATA",
":",
"_METADATA",
"=",
"MetaData",
"(",
"columns",
")",
"return",
"_METADATA"
] | Get or create MetaData singleton. | [
"Get",
"or",
"create",
"MetaData",
"singleton",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/metadata.py#L32-L41 | train | 201,675 |
kblin/ncbi-genome-download | ncbi_genome_download/metadata.py | MetaData.add | def add(self, entry, local_file):
"""Add a metadata row."""
row = self.rowClass()
for key, val in entry.items():
if key in self.columns:
setattr(row, key, val)
row.local_filename = os.path.join('.', os.path.relpath(local_file))
self.rows.append(row) | python | def add(self, entry, local_file):
"""Add a metadata row."""
row = self.rowClass()
for key, val in entry.items():
if key in self.columns:
setattr(row, key, val)
row.local_filename = os.path.join('.', os.path.relpath(local_file))
self.rows.append(row) | [
"def",
"add",
"(",
"self",
",",
"entry",
",",
"local_file",
")",
":",
"row",
"=",
"self",
".",
"rowClass",
"(",
")",
"for",
"key",
",",
"val",
"in",
"entry",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"self",
".",
"columns",
":",
"setattr",
... | Add a metadata row. | [
"Add",
"a",
"metadata",
"row",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/metadata.py#L76-L86 | train | 201,676 |
kblin/ncbi-genome-download | ncbi_genome_download/metadata.py | MetaData.write | def write(self, handle):
"""Write metadata to handle."""
handle.write(u"\t".join(self.columns))
handle.write(u"\n")
for row in self.rows:
row.write(handle) | python | def write(self, handle):
"""Write metadata to handle."""
handle.write(u"\t".join(self.columns))
handle.write(u"\n")
for row in self.rows:
row.write(handle) | [
"def",
"write",
"(",
"self",
",",
"handle",
")",
":",
"handle",
".",
"write",
"(",
"u\"\\t\"",
".",
"join",
"(",
"self",
".",
"columns",
")",
")",
"handle",
".",
"write",
"(",
"u\"\\n\"",
")",
"for",
"row",
"in",
"self",
".",
"rows",
":",
"row",
... | Write metadata to handle. | [
"Write",
"metadata",
"to",
"handle",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/metadata.py#L88-L93 | train | 201,677 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | config_download | def config_download(config):
"""Run the actual download from NCBI with parameters in a config object.
Parameters
----------
config: NgdConfig
A configuration object with the download settings
Returns
-------
int
success code
"""
try:
download_candidates = select_candidates(config)
if len(download_candidates) < 1:
logging.error("No downloads matched your filter. Please check your options.")
return 1
if config.dry_run:
print("Considering the following {} assemblies for download:".format(len(download_candidates)))
for entry, _ in download_candidates:
print(entry['assembly_accession'], entry['organism_name'], sep="\t")
return 0
download_jobs = []
for entry, group in download_candidates:
download_jobs.extend(create_downloadjob(entry, group, config))
if config.parallel == 1:
for dl_job in download_jobs:
worker(dl_job)
else: # pragma: no cover
# Testing multiprocessing code is annoying
pool = Pool(processes=config.parallel)
jobs = pool.map_async(worker, download_jobs)
try:
# 0xFFFF is just "a really long time"
jobs.get(0xFFFF)
except KeyboardInterrupt:
# TODO: Actually test this once I figure out how to do this in py.test
logging.error("Interrupted by user")
return 1
if config.metadata_table:
with codecs.open(config.metadata_table, mode='w', encoding='utf-8') as handle:
table = metadata.get()
table.write(handle)
except requests.exceptions.ConnectionError as err:
logging.error('Download from NCBI failed: %r', err)
# Exit code 75 meas TEMPFAIL in C/C++, so let's stick with that for now.
return 75
return 0 | python | def config_download(config):
"""Run the actual download from NCBI with parameters in a config object.
Parameters
----------
config: NgdConfig
A configuration object with the download settings
Returns
-------
int
success code
"""
try:
download_candidates = select_candidates(config)
if len(download_candidates) < 1:
logging.error("No downloads matched your filter. Please check your options.")
return 1
if config.dry_run:
print("Considering the following {} assemblies for download:".format(len(download_candidates)))
for entry, _ in download_candidates:
print(entry['assembly_accession'], entry['organism_name'], sep="\t")
return 0
download_jobs = []
for entry, group in download_candidates:
download_jobs.extend(create_downloadjob(entry, group, config))
if config.parallel == 1:
for dl_job in download_jobs:
worker(dl_job)
else: # pragma: no cover
# Testing multiprocessing code is annoying
pool = Pool(processes=config.parallel)
jobs = pool.map_async(worker, download_jobs)
try:
# 0xFFFF is just "a really long time"
jobs.get(0xFFFF)
except KeyboardInterrupt:
# TODO: Actually test this once I figure out how to do this in py.test
logging.error("Interrupted by user")
return 1
if config.metadata_table:
with codecs.open(config.metadata_table, mode='w', encoding='utf-8') as handle:
table = metadata.get()
table.write(handle)
except requests.exceptions.ConnectionError as err:
logging.error('Download from NCBI failed: %r', err)
# Exit code 75 meas TEMPFAIL in C/C++, so let's stick with that for now.
return 75
return 0 | [
"def",
"config_download",
"(",
"config",
")",
":",
"try",
":",
"download_candidates",
"=",
"select_candidates",
"(",
"config",
")",
"if",
"len",
"(",
"download_candidates",
")",
"<",
"1",
":",
"logging",
".",
"error",
"(",
"\"No downloads matched your filter. Plea... | Run the actual download from NCBI with parameters in a config object.
Parameters
----------
config: NgdConfig
A configuration object with the download settings
Returns
-------
int
success code | [
"Run",
"the",
"actual",
"download",
"from",
"NCBI",
"with",
"parameters",
"in",
"a",
"config",
"object",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L154-L210 | train | 201,678 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | select_candidates | def select_candidates(config):
"""Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>)
"""
download_candidates = []
for group in config.group:
summary_file = get_summary(config.section, group, config.uri, config.use_cache)
entries = parse_summary(summary_file)
for entry in filter_entries(entries, config):
download_candidates.append((entry, group))
return download_candidates | python | def select_candidates(config):
"""Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>)
"""
download_candidates = []
for group in config.group:
summary_file = get_summary(config.section, group, config.uri, config.use_cache)
entries = parse_summary(summary_file)
for entry in filter_entries(entries, config):
download_candidates.append((entry, group))
return download_candidates | [
"def",
"select_candidates",
"(",
"config",
")",
":",
"download_candidates",
"=",
"[",
"]",
"for",
"group",
"in",
"config",
".",
"group",
":",
"summary_file",
"=",
"get_summary",
"(",
"config",
".",
"section",
",",
"group",
",",
"config",
".",
"uri",
",",
... | Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>) | [
"Select",
"candidates",
"to",
"download",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L213-L235 | train | 201,679 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | filter_entries | def filter_entries(entries, config):
"""Narrrow down which entries to download."""
def in_genus_list(species, genus_list):
for genus in genus_list:
if species.startswith(genus.capitalize()):
return True
return False
new_entries = []
for entry in entries:
if config.type_material and config.type_material != ['any']:
requested_types = map(lambda x: config._RELATION_TO_TYPE_MATERIAL[x], config.type_material)
if not entry['relation_to_type_material'] or entry['relation_to_type_material'] not in requested_types:
logging.debug("Skipping assembly with no reference to type material or reference to type material does not match requested")
continue
else:
print(entry['relation_to_type_material'])
if config.genus and not in_genus_list(entry['organism_name'], config.genus):
logging.debug('Organism name %r does not start with any in %r, skipping',
entry['organism_name'], config.genus)
continue
if config.species_taxid and entry['species_taxid'] not in config.species_taxid:
logging.debug('Species TaxID %r does not match with any in %r, skipping',
entry['species_taxid'], config.species_taxid)
continue
if config.taxid and entry['taxid'] not in config.taxid:
logging.debug('Organism TaxID %r does not match with any in %r, skipping',
entry['taxid'], config.taxid)
continue
if not config.is_compatible_assembly_accession(entry['assembly_accession']):
logging.debug('Skipping entry with incompatible assembly accession %r', entry['assembly_accession'])
continue
if not config.is_compatible_assembly_level(entry['assembly_level']):
logging.debug('Skipping entry with assembly level %r', entry['assembly_level'])
continue
if config.refseq_category != 'all' \
and entry['refseq_category'] != config.get_refseq_category_string(config.refseq_category):
logging.debug('Skipping entry with refseq_category %r, not %r', entry['refseq_category'],
config.refseq_category)
continue
new_entries.append(entry)
return new_entries | python | def filter_entries(entries, config):
"""Narrrow down which entries to download."""
def in_genus_list(species, genus_list):
for genus in genus_list:
if species.startswith(genus.capitalize()):
return True
return False
new_entries = []
for entry in entries:
if config.type_material and config.type_material != ['any']:
requested_types = map(lambda x: config._RELATION_TO_TYPE_MATERIAL[x], config.type_material)
if not entry['relation_to_type_material'] or entry['relation_to_type_material'] not in requested_types:
logging.debug("Skipping assembly with no reference to type material or reference to type material does not match requested")
continue
else:
print(entry['relation_to_type_material'])
if config.genus and not in_genus_list(entry['organism_name'], config.genus):
logging.debug('Organism name %r does not start with any in %r, skipping',
entry['organism_name'], config.genus)
continue
if config.species_taxid and entry['species_taxid'] not in config.species_taxid:
logging.debug('Species TaxID %r does not match with any in %r, skipping',
entry['species_taxid'], config.species_taxid)
continue
if config.taxid and entry['taxid'] not in config.taxid:
logging.debug('Organism TaxID %r does not match with any in %r, skipping',
entry['taxid'], config.taxid)
continue
if not config.is_compatible_assembly_accession(entry['assembly_accession']):
logging.debug('Skipping entry with incompatible assembly accession %r', entry['assembly_accession'])
continue
if not config.is_compatible_assembly_level(entry['assembly_level']):
logging.debug('Skipping entry with assembly level %r', entry['assembly_level'])
continue
if config.refseq_category != 'all' \
and entry['refseq_category'] != config.get_refseq_category_string(config.refseq_category):
logging.debug('Skipping entry with refseq_category %r, not %r', entry['refseq_category'],
config.refseq_category)
continue
new_entries.append(entry)
return new_entries | [
"def",
"filter_entries",
"(",
"entries",
",",
"config",
")",
":",
"def",
"in_genus_list",
"(",
"species",
",",
"genus_list",
")",
":",
"for",
"genus",
"in",
"genus_list",
":",
"if",
"species",
".",
"startswith",
"(",
"genus",
".",
"capitalize",
"(",
")",
... | Narrrow down which entries to download. | [
"Narrrow",
"down",
"which",
"entries",
"to",
"download",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L238-L280 | train | 201,680 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | worker | def worker(job):
"""Run a single download job."""
ret = False
try:
if job.full_url is not None:
req = requests.get(job.full_url, stream=True)
ret = save_and_check(req, job.local_file, job.expected_checksum)
if not ret:
return ret
ret = create_symlink(job.local_file, job.symlink_path)
except KeyboardInterrupt: # pragma: no cover
# TODO: Actually test this once I figure out how to do this in py.test
logging.debug("Ignoring keyboard interrupt.")
return ret | python | def worker(job):
"""Run a single download job."""
ret = False
try:
if job.full_url is not None:
req = requests.get(job.full_url, stream=True)
ret = save_and_check(req, job.local_file, job.expected_checksum)
if not ret:
return ret
ret = create_symlink(job.local_file, job.symlink_path)
except KeyboardInterrupt: # pragma: no cover
# TODO: Actually test this once I figure out how to do this in py.test
logging.debug("Ignoring keyboard interrupt.")
return ret | [
"def",
"worker",
"(",
"job",
")",
":",
"ret",
"=",
"False",
"try",
":",
"if",
"job",
".",
"full_url",
"is",
"not",
"None",
":",
"req",
"=",
"requests",
".",
"get",
"(",
"job",
".",
"full_url",
",",
"stream",
"=",
"True",
")",
"ret",
"=",
"save_an... | Run a single download job. | [
"Run",
"a",
"single",
"download",
"job",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L283-L297 | train | 201,681 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | get_summary | def get_summary(section, domain, uri, use_cache):
"""Get the assembly_summary.txt file from NCBI and return a StringIO object for it."""
logging.debug('Checking for a cached summary file')
cachefile = "{section}_{domain}_assembly_summary.txt".format(section=section, domain=domain)
full_cachefile = os.path.join(CACHE_DIR, cachefile)
if use_cache and os.path.exists(full_cachefile) and \
datetime.utcnow() - datetime.fromtimestamp(os.path.getmtime(full_cachefile)) < timedelta(days=1):
logging.info('Using cached summary.')
with codecs.open(full_cachefile, 'r', encoding='utf-8') as fh:
return StringIO(fh.read())
logging.debug('Downloading summary for %r/%r uri: %r', section, domain, uri)
url = '{uri}/{section}/{domain}/assembly_summary.txt'.format(
section=section, domain=domain, uri=uri)
req = requests.get(url)
if use_cache:
try:
os.makedirs(CACHE_DIR)
except OSError as err:
# Errno 17 is "file exists", ignore that, otherwise re-raise
if err.errno != 17:
raise
with codecs.open(full_cachefile, 'w', encoding='utf-8') as fh:
fh.write(req.text)
return StringIO(req.text) | python | def get_summary(section, domain, uri, use_cache):
"""Get the assembly_summary.txt file from NCBI and return a StringIO object for it."""
logging.debug('Checking for a cached summary file')
cachefile = "{section}_{domain}_assembly_summary.txt".format(section=section, domain=domain)
full_cachefile = os.path.join(CACHE_DIR, cachefile)
if use_cache and os.path.exists(full_cachefile) and \
datetime.utcnow() - datetime.fromtimestamp(os.path.getmtime(full_cachefile)) < timedelta(days=1):
logging.info('Using cached summary.')
with codecs.open(full_cachefile, 'r', encoding='utf-8') as fh:
return StringIO(fh.read())
logging.debug('Downloading summary for %r/%r uri: %r', section, domain, uri)
url = '{uri}/{section}/{domain}/assembly_summary.txt'.format(
section=section, domain=domain, uri=uri)
req = requests.get(url)
if use_cache:
try:
os.makedirs(CACHE_DIR)
except OSError as err:
# Errno 17 is "file exists", ignore that, otherwise re-raise
if err.errno != 17:
raise
with codecs.open(full_cachefile, 'w', encoding='utf-8') as fh:
fh.write(req.text)
return StringIO(req.text) | [
"def",
"get_summary",
"(",
"section",
",",
"domain",
",",
"uri",
",",
"use_cache",
")",
":",
"logging",
".",
"debug",
"(",
"'Checking for a cached summary file'",
")",
"cachefile",
"=",
"\"{section}_{domain}_assembly_summary.txt\"",
".",
"format",
"(",
"section",
"=... | Get the assembly_summary.txt file from NCBI and return a StringIO object for it. | [
"Get",
"the",
"assembly_summary",
".",
"txt",
"file",
"from",
"NCBI",
"and",
"return",
"a",
"StringIO",
"object",
"for",
"it",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L300-L328 | train | 201,682 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | create_downloadjob | def create_downloadjob(entry, domain, config):
"""Create download jobs for all file formats from a summary file entry."""
logging.info('Checking record %r', entry['assembly_accession'])
full_output_dir = create_dir(entry, config.section, domain, config.output)
symlink_path = None
if config.human_readable:
symlink_path = create_readable_dir(entry, config.section, domain, config.output)
checksums = grab_checksums_file(entry)
# TODO: Only write this when the checksums file changed
with open(os.path.join(full_output_dir, 'MD5SUMS'), 'w') as handle:
handle.write(checksums)
parsed_checksums = parse_checksums(checksums)
download_jobs = []
for fmt in config.file_format:
try:
if has_file_changed(full_output_dir, parsed_checksums, fmt):
download_jobs.append(
download_file_job(entry, full_output_dir, parsed_checksums, fmt, symlink_path))
elif need_to_create_symlink(full_output_dir, parsed_checksums, fmt, symlink_path):
download_jobs.append(
create_symlink_job(full_output_dir, parsed_checksums, fmt, symlink_path))
except ValueError as err:
logging.error(err)
return download_jobs | python | def create_downloadjob(entry, domain, config):
"""Create download jobs for all file formats from a summary file entry."""
logging.info('Checking record %r', entry['assembly_accession'])
full_output_dir = create_dir(entry, config.section, domain, config.output)
symlink_path = None
if config.human_readable:
symlink_path = create_readable_dir(entry, config.section, domain, config.output)
checksums = grab_checksums_file(entry)
# TODO: Only write this when the checksums file changed
with open(os.path.join(full_output_dir, 'MD5SUMS'), 'w') as handle:
handle.write(checksums)
parsed_checksums = parse_checksums(checksums)
download_jobs = []
for fmt in config.file_format:
try:
if has_file_changed(full_output_dir, parsed_checksums, fmt):
download_jobs.append(
download_file_job(entry, full_output_dir, parsed_checksums, fmt, symlink_path))
elif need_to_create_symlink(full_output_dir, parsed_checksums, fmt, symlink_path):
download_jobs.append(
create_symlink_job(full_output_dir, parsed_checksums, fmt, symlink_path))
except ValueError as err:
logging.error(err)
return download_jobs | [
"def",
"create_downloadjob",
"(",
"entry",
",",
"domain",
",",
"config",
")",
":",
"logging",
".",
"info",
"(",
"'Checking record %r'",
",",
"entry",
"[",
"'assembly_accession'",
"]",
")",
"full_output_dir",
"=",
"create_dir",
"(",
"entry",
",",
"config",
".",... | Create download jobs for all file formats from a summary file entry. | [
"Create",
"download",
"jobs",
"for",
"all",
"file",
"formats",
"from",
"a",
"summary",
"file",
"entry",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L336-L365 | train | 201,683 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | create_dir | def create_dir(entry, section, domain, output):
"""Create the output directory for the entry if needed."""
full_output_dir = os.path.join(output, section, domain, entry['assembly_accession'])
try:
os.makedirs(full_output_dir)
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(full_output_dir):
pass
else:
raise
return full_output_dir | python | def create_dir(entry, section, domain, output):
"""Create the output directory for the entry if needed."""
full_output_dir = os.path.join(output, section, domain, entry['assembly_accession'])
try:
os.makedirs(full_output_dir)
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(full_output_dir):
pass
else:
raise
return full_output_dir | [
"def",
"create_dir",
"(",
"entry",
",",
"section",
",",
"domain",
",",
"output",
")",
":",
"full_output_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output",
",",
"section",
",",
"domain",
",",
"entry",
"[",
"'assembly_accession'",
"]",
")",
"try",
... | Create the output directory for the entry if needed. | [
"Create",
"the",
"output",
"directory",
"for",
"the",
"entry",
"if",
"needed",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L368-L379 | train | 201,684 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | create_readable_dir | def create_readable_dir(entry, section, domain, output):
"""Create the a human-readable directory to link the entry to if needed."""
if domain != 'viral':
full_output_dir = os.path.join(output, 'human_readable', section, domain,
get_genus_label(entry),
get_species_label(entry),
get_strain_label(entry))
else:
full_output_dir = os.path.join(output, 'human_readable', section, domain,
entry['organism_name'].replace(' ', '_'),
get_strain_label(entry, viral=True))
try:
os.makedirs(full_output_dir)
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(full_output_dir):
pass
else:
raise
return full_output_dir | python | def create_readable_dir(entry, section, domain, output):
"""Create the a human-readable directory to link the entry to if needed."""
if domain != 'viral':
full_output_dir = os.path.join(output, 'human_readable', section, domain,
get_genus_label(entry),
get_species_label(entry),
get_strain_label(entry))
else:
full_output_dir = os.path.join(output, 'human_readable', section, domain,
entry['organism_name'].replace(' ', '_'),
get_strain_label(entry, viral=True))
try:
os.makedirs(full_output_dir)
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(full_output_dir):
pass
else:
raise
return full_output_dir | [
"def",
"create_readable_dir",
"(",
"entry",
",",
"section",
",",
"domain",
",",
"output",
")",
":",
"if",
"domain",
"!=",
"'viral'",
":",
"full_output_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output",
",",
"'human_readable'",
",",
"section",
",",
... | Create the a human-readable directory to link the entry to if needed. | [
"Create",
"the",
"a",
"human",
"-",
"readable",
"directory",
"to",
"link",
"the",
"entry",
"to",
"if",
"needed",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L382-L402 | train | 201,685 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | grab_checksums_file | def grab_checksums_file(entry):
"""Grab the checksum file for a given entry."""
http_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/md5checksums.txt'.format(http_url)
req = requests.get(full_url)
return req.text | python | def grab_checksums_file(entry):
"""Grab the checksum file for a given entry."""
http_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/md5checksums.txt'.format(http_url)
req = requests.get(full_url)
return req.text | [
"def",
"grab_checksums_file",
"(",
"entry",
")",
":",
"http_url",
"=",
"convert_ftp_url",
"(",
"entry",
"[",
"'ftp_path'",
"]",
")",
"full_url",
"=",
"'{}/md5checksums.txt'",
".",
"format",
"(",
"http_url",
")",
"req",
"=",
"requests",
".",
"get",
"(",
"full... | Grab the checksum file for a given entry. | [
"Grab",
"the",
"checksum",
"file",
"for",
"a",
"given",
"entry",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L405-L410 | train | 201,686 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | parse_checksums | def parse_checksums(checksums_string):
"""Parse a file containing checksums and filenames."""
checksums_list = []
for line in checksums_string.split('\n'):
try:
# skip empty lines
if line == '':
continue
checksum, filename = line.split()
# strip leading ./
if filename.startswith('./'):
filename = filename[2:]
checksums_list.append({'checksum': checksum, 'file': filename})
except ValueError:
logging.debug('Skipping over unexpected checksum line %r', line)
continue
return checksums_list | python | def parse_checksums(checksums_string):
"""Parse a file containing checksums and filenames."""
checksums_list = []
for line in checksums_string.split('\n'):
try:
# skip empty lines
if line == '':
continue
checksum, filename = line.split()
# strip leading ./
if filename.startswith('./'):
filename = filename[2:]
checksums_list.append({'checksum': checksum, 'file': filename})
except ValueError:
logging.debug('Skipping over unexpected checksum line %r', line)
continue
return checksums_list | [
"def",
"parse_checksums",
"(",
"checksums_string",
")",
":",
"checksums_list",
"=",
"[",
"]",
"for",
"line",
"in",
"checksums_string",
".",
"split",
"(",
"'\\n'",
")",
":",
"try",
":",
"# skip empty lines",
"if",
"line",
"==",
"''",
":",
"continue",
"checksu... | Parse a file containing checksums and filenames. | [
"Parse",
"a",
"file",
"containing",
"checksums",
"and",
"filenames",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L418-L436 | train | 201,687 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | has_file_changed | def has_file_changed(directory, checksums, filetype='genbank'):
"""Check if the checksum of a given file has changed."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
full_filename = os.path.join(directory, filename)
# if file doesn't exist, it has changed
if not os.path.isfile(full_filename):
return True
actual_checksum = md5sum(full_filename)
return expected_checksum != actual_checksum | python | def has_file_changed(directory, checksums, filetype='genbank'):
"""Check if the checksum of a given file has changed."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
full_filename = os.path.join(directory, filename)
# if file doesn't exist, it has changed
if not os.path.isfile(full_filename):
return True
actual_checksum = md5sum(full_filename)
return expected_checksum != actual_checksum | [
"def",
"has_file_changed",
"(",
"directory",
",",
"checksums",
",",
"filetype",
"=",
"'genbank'",
")",
":",
"pattern",
"=",
"NgdConfig",
".",
"get_fileending",
"(",
"filetype",
")",
"filename",
",",
"expected_checksum",
"=",
"get_name_and_checksum",
"(",
"checksum... | Check if the checksum of a given file has changed. | [
"Check",
"if",
"the",
"checksum",
"of",
"a",
"given",
"file",
"has",
"changed",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L439-L449 | train | 201,688 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | need_to_create_symlink | def need_to_create_symlink(directory, checksums, filetype, symlink_path):
"""Check if we need to create a symlink for an existing file."""
# If we don't have a symlink path, we don't need to create a symlink
if symlink_path is None:
return False
pattern = NgdConfig.get_fileending(filetype)
filename, _ = get_name_and_checksum(checksums, pattern)
full_filename = os.path.join(directory, filename)
symlink_name = os.path.join(symlink_path, filename)
if os.path.islink(symlink_name):
existing_link = os.readlink(symlink_name)
if full_filename == existing_link:
return False
return True | python | def need_to_create_symlink(directory, checksums, filetype, symlink_path):
"""Check if we need to create a symlink for an existing file."""
# If we don't have a symlink path, we don't need to create a symlink
if symlink_path is None:
return False
pattern = NgdConfig.get_fileending(filetype)
filename, _ = get_name_and_checksum(checksums, pattern)
full_filename = os.path.join(directory, filename)
symlink_name = os.path.join(symlink_path, filename)
if os.path.islink(symlink_name):
existing_link = os.readlink(symlink_name)
if full_filename == existing_link:
return False
return True | [
"def",
"need_to_create_symlink",
"(",
"directory",
",",
"checksums",
",",
"filetype",
",",
"symlink_path",
")",
":",
"# If we don't have a symlink path, we don't need to create a symlink",
"if",
"symlink_path",
"is",
"None",
":",
"return",
"False",
"pattern",
"=",
"NgdCon... | Check if we need to create a symlink for an existing file. | [
"Check",
"if",
"we",
"need",
"to",
"create",
"a",
"symlink",
"for",
"an",
"existing",
"file",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L452-L468 | train | 201,689 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | get_name_and_checksum | def get_name_and_checksum(checksums, end):
"""Extract a full filename and checksum from the checksums list for a file ending in given end."""
for entry in checksums:
if not entry['file'].endswith(end):
# wrong file
continue
# workaround for ..cds_from_genomic.fna.gz and ..rna_from_genomic.fna.gz also
# ending in _genomic.fna.gz, causing bogus matches for the plain fasta
if '_from_' not in end and '_from_' in entry['file']:
# still the wrong file
continue
filename = entry['file']
expected_checksum = entry['checksum']
return filename, expected_checksum
raise ValueError('No entry for file ending in {!r}'.format(end)) | python | def get_name_and_checksum(checksums, end):
"""Extract a full filename and checksum from the checksums list for a file ending in given end."""
for entry in checksums:
if not entry['file'].endswith(end):
# wrong file
continue
# workaround for ..cds_from_genomic.fna.gz and ..rna_from_genomic.fna.gz also
# ending in _genomic.fna.gz, causing bogus matches for the plain fasta
if '_from_' not in end and '_from_' in entry['file']:
# still the wrong file
continue
filename = entry['file']
expected_checksum = entry['checksum']
return filename, expected_checksum
raise ValueError('No entry for file ending in {!r}'.format(end)) | [
"def",
"get_name_and_checksum",
"(",
"checksums",
",",
"end",
")",
":",
"for",
"entry",
"in",
"checksums",
":",
"if",
"not",
"entry",
"[",
"'file'",
"]",
".",
"endswith",
"(",
"end",
")",
":",
"# wrong file",
"continue",
"# workaround for ..cds_from_genomic.fna.... | Extract a full filename and checksum from the checksums list for a file ending in given end. | [
"Extract",
"a",
"full",
"filename",
"and",
"checksum",
"from",
"the",
"checksums",
"list",
"for",
"a",
"file",
"ending",
"in",
"given",
"end",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L471-L485 | train | 201,690 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | md5sum | def md5sum(filename):
"""Calculate the md5sum of a file and return the hexdigest."""
hash_md5 = hashlib.md5()
with open(filename, 'rb') as handle:
for chunk in iter(lambda: handle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest() | python | def md5sum(filename):
"""Calculate the md5sum of a file and return the hexdigest."""
hash_md5 = hashlib.md5()
with open(filename, 'rb') as handle:
for chunk in iter(lambda: handle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest() | [
"def",
"md5sum",
"(",
"filename",
")",
":",
"hash_md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"handle",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"handle",
".",
"read",
"(",
"409... | Calculate the md5sum of a file and return the hexdigest. | [
"Calculate",
"the",
"md5sum",
"of",
"a",
"file",
"and",
"return",
"the",
"hexdigest",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L488-L494 | train | 201,691 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | download_file_job | def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None):
"""Generate a DownloadJob that actually triggers a file download."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
base_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/{}'.format(base_url, filename)
local_file = os.path.join(directory, filename)
full_symlink = None
if symlink_path is not None:
full_symlink = os.path.join(symlink_path, filename)
# Keep metadata around
mtable = metadata.get()
mtable.add(entry, local_file)
return DownloadJob(full_url, local_file, expected_checksum, full_symlink) | python | def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None):
"""Generate a DownloadJob that actually triggers a file download."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
base_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/{}'.format(base_url, filename)
local_file = os.path.join(directory, filename)
full_symlink = None
if symlink_path is not None:
full_symlink = os.path.join(symlink_path, filename)
# Keep metadata around
mtable = metadata.get()
mtable.add(entry, local_file)
return DownloadJob(full_url, local_file, expected_checksum, full_symlink) | [
"def",
"download_file_job",
"(",
"entry",
",",
"directory",
",",
"checksums",
",",
"filetype",
"=",
"'genbank'",
",",
"symlink_path",
"=",
"None",
")",
":",
"pattern",
"=",
"NgdConfig",
".",
"get_fileending",
"(",
"filetype",
")",
"filename",
",",
"expected_ch... | Generate a DownloadJob that actually triggers a file download. | [
"Generate",
"a",
"DownloadJob",
"that",
"actually",
"triggers",
"a",
"file",
"download",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L499-L514 | train | 201,692 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | create_symlink_job | def create_symlink_job(directory, checksums, filetype, symlink_path):
"""Create a symlink-creating DownloadJob for an already downloaded file."""
pattern = NgdConfig.get_fileending(filetype)
filename, _ = get_name_and_checksum(checksums, pattern)
local_file = os.path.join(directory, filename)
full_symlink = os.path.join(symlink_path, filename)
return DownloadJob(None, local_file, None, full_symlink) | python | def create_symlink_job(directory, checksums, filetype, symlink_path):
"""Create a symlink-creating DownloadJob for an already downloaded file."""
pattern = NgdConfig.get_fileending(filetype)
filename, _ = get_name_and_checksum(checksums, pattern)
local_file = os.path.join(directory, filename)
full_symlink = os.path.join(symlink_path, filename)
return DownloadJob(None, local_file, None, full_symlink) | [
"def",
"create_symlink_job",
"(",
"directory",
",",
"checksums",
",",
"filetype",
",",
"symlink_path",
")",
":",
"pattern",
"=",
"NgdConfig",
".",
"get_fileending",
"(",
"filetype",
")",
"filename",
",",
"_",
"=",
"get_name_and_checksum",
"(",
"checksums",
",",
... | Create a symlink-creating DownloadJob for an already downloaded file. | [
"Create",
"a",
"symlink",
"-",
"creating",
"DownloadJob",
"for",
"an",
"already",
"downloaded",
"file",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L518-L524 | train | 201,693 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | save_and_check | def save_and_check(response, local_file, expected_checksum):
"""Save the content of an http response and verify the checksum matches."""
with open(local_file, 'wb') as handle:
for chunk in response.iter_content(4096):
handle.write(chunk)
actual_checksum = md5sum(local_file)
if actual_checksum != expected_checksum:
logging.error('Checksum mismatch for %r. Expected %r, got %r',
local_file, expected_checksum, actual_checksum)
return False
return True | python | def save_and_check(response, local_file, expected_checksum):
"""Save the content of an http response and verify the checksum matches."""
with open(local_file, 'wb') as handle:
for chunk in response.iter_content(4096):
handle.write(chunk)
actual_checksum = md5sum(local_file)
if actual_checksum != expected_checksum:
logging.error('Checksum mismatch for %r. Expected %r, got %r',
local_file, expected_checksum, actual_checksum)
return False
return True | [
"def",
"save_and_check",
"(",
"response",
",",
"local_file",
",",
"expected_checksum",
")",
":",
"with",
"open",
"(",
"local_file",
",",
"'wb'",
")",
"as",
"handle",
":",
"for",
"chunk",
"in",
"response",
".",
"iter_content",
"(",
"4096",
")",
":",
"handle... | Save the content of an http response and verify the checksum matches. | [
"Save",
"the",
"content",
"of",
"an",
"http",
"response",
"and",
"verify",
"the",
"checksum",
"matches",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L527-L539 | train | 201,694 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | create_symlink | def create_symlink(local_file, symlink_path):
"""Create a relative symbolic link if symlink path is given.
Parameters
----------
local_file
relative path to output folder (includes ./ prefix) of file saved
symlink_path
relative path to output folder (includes ./ prefix) of symbolic link
to be created
Returns
-------
bool
success code
"""
if symlink_path is not None:
if os.path.exists(symlink_path) or os.path.lexists(symlink_path):
os.unlink(symlink_path)
local_file = os.path.normpath(local_file)
symlink_path = os.path.normpath(symlink_path)
num_dirs_upward = len(os.path.dirname(symlink_path).split(os.sep))
local_relative_to_symlink = num_dirs_upward * (os.pardir + os.sep)
os.symlink(os.path.join(local_relative_to_symlink, local_file),
symlink_path)
return True | python | def create_symlink(local_file, symlink_path):
"""Create a relative symbolic link if symlink path is given.
Parameters
----------
local_file
relative path to output folder (includes ./ prefix) of file saved
symlink_path
relative path to output folder (includes ./ prefix) of symbolic link
to be created
Returns
-------
bool
success code
"""
if symlink_path is not None:
if os.path.exists(symlink_path) or os.path.lexists(symlink_path):
os.unlink(symlink_path)
local_file = os.path.normpath(local_file)
symlink_path = os.path.normpath(symlink_path)
num_dirs_upward = len(os.path.dirname(symlink_path).split(os.sep))
local_relative_to_symlink = num_dirs_upward * (os.pardir + os.sep)
os.symlink(os.path.join(local_relative_to_symlink, local_file),
symlink_path)
return True | [
"def",
"create_symlink",
"(",
"local_file",
",",
"symlink_path",
")",
":",
"if",
"symlink_path",
"is",
"not",
"None",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"symlink_path",
")",
"or",
"os",
".",
"path",
".",
"lexists",
"(",
"symlink_path",
")"... | Create a relative symbolic link if symlink path is given.
Parameters
----------
local_file
relative path to output folder (includes ./ prefix) of file saved
symlink_path
relative path to output folder (includes ./ prefix) of symbolic link
to be created
Returns
-------
bool
success code | [
"Create",
"a",
"relative",
"symbolic",
"link",
"if",
"symlink",
"path",
"is",
"given",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L542-L569 | train | 201,695 |
kblin/ncbi-genome-download | ncbi_genome_download/core.py | get_strain_label | def get_strain_label(entry, viral=False):
"""Try to extract a strain from an assemly summary entry.
First this checks 'infraspecific_name', then 'isolate', then
it tries to get it from 'organism_name'. If all fails, it
falls back to just returning the assembly accesion number.
"""
def get_strain(entry):
strain = entry['infraspecific_name']
if strain != '':
strain = strain.split('=')[-1]
return strain
strain = entry['isolate']
if strain != '':
return strain
if len(entry['organism_name'].split(' ')) > 2 and not viral:
strain = ' '.join(entry['organism_name'].split(' ')[2:])
return strain
return entry['assembly_accession']
def cleanup(strain):
strain = strain.strip()
strain = strain.replace(' ', '_')
strain = strain.replace(';', '_')
strain = strain.replace('/', '_')
strain = strain.replace('\\', '_')
return strain
return cleanup(get_strain(entry)) | python | def get_strain_label(entry, viral=False):
"""Try to extract a strain from an assemly summary entry.
First this checks 'infraspecific_name', then 'isolate', then
it tries to get it from 'organism_name'. If all fails, it
falls back to just returning the assembly accesion number.
"""
def get_strain(entry):
strain = entry['infraspecific_name']
if strain != '':
strain = strain.split('=')[-1]
return strain
strain = entry['isolate']
if strain != '':
return strain
if len(entry['organism_name'].split(' ')) > 2 and not viral:
strain = ' '.join(entry['organism_name'].split(' ')[2:])
return strain
return entry['assembly_accession']
def cleanup(strain):
strain = strain.strip()
strain = strain.replace(' ', '_')
strain = strain.replace(';', '_')
strain = strain.replace('/', '_')
strain = strain.replace('\\', '_')
return strain
return cleanup(get_strain(entry)) | [
"def",
"get_strain_label",
"(",
"entry",
",",
"viral",
"=",
"False",
")",
":",
"def",
"get_strain",
"(",
"entry",
")",
":",
"strain",
"=",
"entry",
"[",
"'infraspecific_name'",
"]",
"if",
"strain",
"!=",
"''",
":",
"strain",
"=",
"strain",
".",
"split",
... | Try to extract a strain from an assemly summary entry.
First this checks 'infraspecific_name', then 'isolate', then
it tries to get it from 'organism_name'. If all fails, it
falls back to just returning the assembly accesion number. | [
"Try",
"to",
"extract",
"a",
"strain",
"from",
"an",
"assemly",
"summary",
"entry",
"."
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L582-L613 | train | 201,696 |
kblin/ncbi-genome-download | contrib/gimme_taxa.py | pretty | def pretty(d, indent=0):
"""A prettier way to print nested dicts
"""
for key, value in d.items():
print(' ' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
sys.stderr.write(' ' * (indent+1) + str(value) + '\n') | python | def pretty(d, indent=0):
"""A prettier way to print nested dicts
"""
for key, value in d.items():
print(' ' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
sys.stderr.write(' ' * (indent+1) + str(value) + '\n') | [
"def",
"pretty",
"(",
"d",
",",
"indent",
"=",
"0",
")",
":",
"for",
"key",
",",
"value",
"in",
"d",
".",
"items",
"(",
")",
":",
"print",
"(",
"' '",
"*",
"indent",
"+",
"str",
"(",
"key",
")",
")",
"if",
"isinstance",
"(",
"value",
",",
"d... | A prettier way to print nested dicts | [
"A",
"prettier",
"way",
"to",
"print",
"nested",
"dicts"
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/contrib/gimme_taxa.py#L85-L93 | train | 201,697 |
kblin/ncbi-genome-download | contrib/gimme_taxa.py | desc_taxa | def desc_taxa(taxid, ncbi, outFH, just_taxids=False):
"""Write descendent taxa for taxid
"""
# Main feature of the script is to get all taxa within a given group.
descendent_taxa = ncbi.get_descendant_taxa(taxid)
descendent_taxa_names = ncbi.translate_to_names(descendent_taxa)
if just_taxids:
for taxid in descendent_taxa:
outFH.write(str(taxid) + '\n')
else:
for dtn, dt in zip(descendent_taxa_names, descendent_taxa):
x = [str(x) for x in [taxid, dt, dtn]]
outFH.write('\t'.join(x) + '\n') | python | def desc_taxa(taxid, ncbi, outFH, just_taxids=False):
"""Write descendent taxa for taxid
"""
# Main feature of the script is to get all taxa within a given group.
descendent_taxa = ncbi.get_descendant_taxa(taxid)
descendent_taxa_names = ncbi.translate_to_names(descendent_taxa)
if just_taxids:
for taxid in descendent_taxa:
outFH.write(str(taxid) + '\n')
else:
for dtn, dt in zip(descendent_taxa_names, descendent_taxa):
x = [str(x) for x in [taxid, dt, dtn]]
outFH.write('\t'.join(x) + '\n') | [
"def",
"desc_taxa",
"(",
"taxid",
",",
"ncbi",
",",
"outFH",
",",
"just_taxids",
"=",
"False",
")",
":",
"# Main feature of the script is to get all taxa within a given group.\t",
"descendent_taxa",
"=",
"ncbi",
".",
"get_descendant_taxa",
"(",
"taxid",
")",
"descendent... | Write descendent taxa for taxid | [
"Write",
"descendent",
"taxa",
"for",
"taxid"
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/contrib/gimme_taxa.py#L95-L108 | train | 201,698 |
kblin/ncbi-genome-download | contrib/gimme_taxa.py | taxon_info | def taxon_info(taxid, ncbi, outFH):
"""Write info on taxid
"""
taxid = int(taxid)
tax_name = ncbi.get_taxid_translator([taxid])[taxid]
rank = list(ncbi.get_rank([taxid]).values())[0]
lineage = ncbi.get_taxid_translator(ncbi.get_lineage(taxid))
lineage = ['{}:{}'.format(k,v) for k,v in lineage.items()]
lineage = ';'.join(lineage)
x = [str(x) for x in [tax_name, taxid, rank, lineage]]
outFH.write('\t'.join(x) + '\n') | python | def taxon_info(taxid, ncbi, outFH):
"""Write info on taxid
"""
taxid = int(taxid)
tax_name = ncbi.get_taxid_translator([taxid])[taxid]
rank = list(ncbi.get_rank([taxid]).values())[0]
lineage = ncbi.get_taxid_translator(ncbi.get_lineage(taxid))
lineage = ['{}:{}'.format(k,v) for k,v in lineage.items()]
lineage = ';'.join(lineage)
x = [str(x) for x in [tax_name, taxid, rank, lineage]]
outFH.write('\t'.join(x) + '\n') | [
"def",
"taxon_info",
"(",
"taxid",
",",
"ncbi",
",",
"outFH",
")",
":",
"taxid",
"=",
"int",
"(",
"taxid",
")",
"tax_name",
"=",
"ncbi",
".",
"get_taxid_translator",
"(",
"[",
"taxid",
"]",
")",
"[",
"taxid",
"]",
"rank",
"=",
"list",
"(",
"ncbi",
... | Write info on taxid | [
"Write",
"info",
"on",
"taxid"
] | dc55382d351c29e1027be8fa3876701762c1d752 | https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/contrib/gimme_taxa.py#L110-L120 | train | 201,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.