repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/productmilestones_api.py
ProductmilestonesApi.get_distributed_builds
def get_distributed_builds(self, id, **kwargs): """ Gets the set of builds which produced artifacts distributed/shipped in a Product Milestone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_distributed_builds(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Milestone id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_distributed_builds_with_http_info(id, **kwargs) else: (data) = self.get_distributed_builds_with_http_info(id, **kwargs) return data
python
def get_distributed_builds(self, id, **kwargs): """ Gets the set of builds which produced artifacts distributed/shipped in a Product Milestone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_distributed_builds(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Milestone id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_distributed_builds_with_http_info(id, **kwargs) else: (data) = self.get_distributed_builds_with_http_info(id, **kwargs) return data
[ "def", "get_distributed_builds", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "get_distributed_builds_w...
Gets the set of builds which produced artifacts distributed/shipped in a Product Milestone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_distributed_builds(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Milestone id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread.
[ "Gets", "the", "set", "of", "builds", "which", "produced", "artifacts", "distributed", "/", "shipped", "in", "a", "Product", "Milestone", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous"...
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/productmilestones_api.py#L830-L858
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/productmilestones_api.py
ProductmilestonesApi.get_performed_builds
def get_performed_builds(self, id, **kwargs): """ Gets the set of builds performed during in a Product Milestone cycle This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_performed_builds(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Milestone id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_performed_builds_with_http_info(id, **kwargs) else: (data) = self.get_performed_builds_with_http_info(id, **kwargs) return data
python
def get_performed_builds(self, id, **kwargs): """ Gets the set of builds performed during in a Product Milestone cycle This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_performed_builds(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Milestone id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_performed_builds_with_http_info(id, **kwargs) else: (data) = self.get_performed_builds_with_http_info(id, **kwargs) return data
[ "def", "get_performed_builds", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "get_performed_builds_with_...
Gets the set of builds performed during in a Product Milestone cycle This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_performed_builds(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Milestone id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread.
[ "Gets", "the", "set", "of", "builds", "performed", "during", "in", "a", "Product", "Milestone", "cycle", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", ...
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/productmilestones_api.py#L1058-L1086
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/productmilestones_api.py
ProductmilestonesApi.remove_distributed_artifact
def remove_distributed_artifact(self, id, artifact_id, **kwargs): """ Removes an artifact from the specified product milestone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_distributed_artifact(id, artifact_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product milestone id (required) :param int artifact_id: Artifact id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.remove_distributed_artifact_with_http_info(id, artifact_id, **kwargs) else: (data) = self.remove_distributed_artifact_with_http_info(id, artifact_id, **kwargs) return data
python
def remove_distributed_artifact(self, id, artifact_id, **kwargs): """ Removes an artifact from the specified product milestone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_distributed_artifact(id, artifact_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product milestone id (required) :param int artifact_id: Artifact id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.remove_distributed_artifact_with_http_info(id, artifact_id, **kwargs) else: (data) = self.remove_distributed_artifact_with_http_info(id, artifact_id, **kwargs) return data
[ "def", "remove_distributed_artifact", "(", "self", ",", "id", ",", "artifact_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", "."...
Removes an artifact from the specified product milestone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_distributed_artifact(id, artifact_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product milestone id (required) :param int artifact_id: Artifact id (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Removes", "an", "artifact", "from", "the", "specified", "product", "milestone", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", ...
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/productmilestones_api.py#L1286-L1311
cosven/feeluown-core
fuocore/xiami/models.py
XUserModel.playlists
def playlists(self): """获取用户创建的歌单 如果不是用户本人,则不能获取用户默认精选集 """ if self._playlists is None: playlists_data = self._api.user_playlists(self.identifier) playlists = [] for playlist_data in playlists_data: playlist = _deserialize(playlist_data, PlaylistSchema) playlists.append(playlist) self._playlists = playlists return self._playlists
python
def playlists(self): """获取用户创建的歌单 如果不是用户本人,则不能获取用户默认精选集 """ if self._playlists is None: playlists_data = self._api.user_playlists(self.identifier) playlists = [] for playlist_data in playlists_data: playlist = _deserialize(playlist_data, PlaylistSchema) playlists.append(playlist) self._playlists = playlists return self._playlists
[ "def", "playlists", "(", "self", ")", ":", "if", "self", ".", "_playlists", "is", "None", ":", "playlists_data", "=", "self", ".", "_api", ".", "user_playlists", "(", "self", ".", "identifier", ")", "playlists", "=", "[", "]", "for", "playlist_data", "in...
获取用户创建的歌单 如果不是用户本人,则不能获取用户默认精选集
[ "获取用户创建的歌单" ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/xiami/models.py#L157-L169
cosven/feeluown-core
fuocore/xiami/models.py
XUserModel.fav_songs
def fav_songs(self): """ FIXME: 支持获取所有的收藏歌曲 """ if self._fav_songs is None: songs_data = self._api.user_favorite_songs(self.identifier) self._fav_songs = [] if not songs_data: return for song_data in songs_data: song = _deserialize(song_data, NestedSongSchema) self._fav_songs.append(song) return self._fav_songs
python
def fav_songs(self): """ FIXME: 支持获取所有的收藏歌曲 """ if self._fav_songs is None: songs_data = self._api.user_favorite_songs(self.identifier) self._fav_songs = [] if not songs_data: return for song_data in songs_data: song = _deserialize(song_data, NestedSongSchema) self._fav_songs.append(song) return self._fav_songs
[ "def", "fav_songs", "(", "self", ")", ":", "if", "self", ".", "_fav_songs", "is", "None", ":", "songs_data", "=", "self", ".", "_api", ".", "user_favorite_songs", "(", "self", ".", "identifier", ")", "self", ".", "_fav_songs", "=", "[", "]", "if", "not...
FIXME: 支持获取所有的收藏歌曲
[ "FIXME", ":", "支持获取所有的收藏歌曲" ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/xiami/models.py#L191-L203
kurtraschke/pyRFC3339
pyrfc3339/parser.py
parse
def parse(timestamp, utc=False, produce_naive=False): ''' Parse an :RFC:`3339`-formatted timestamp and return a `datetime.datetime`. If the timestamp is presented in UTC, then the `tzinfo` parameter of the returned `datetime` will be set to `pytz.utc`. >>> parse('2009-01-01T10:01:02Z') datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>) Otherwise, a `tzinfo` instance is created with the appropriate offset, and the `tzinfo` parameter of the returned `datetime` is set to that value. >>> parse('2009-01-01T14:01:02-04:00') datetime.datetime(2009, 1, 1, 14, 1, 2, tzinfo=<UTC-04:00>) However, if `parse()` is called with `utc=True`, then the returned `datetime` will be normalized to UTC (and its tzinfo parameter set to `pytz.utc`), regardless of the input timezone. >>> parse('2009-01-01T06:01:02-04:00', utc=True) datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>) The input is strictly required to conform to :RFC:`3339`, and appropriate exceptions are thrown for invalid input. >>> parse('2009-01-01T06:01:02') Traceback (most recent call last): ... ValueError: timestamp does not conform to RFC 3339 >>> parse('2009-01-01T25:01:02Z') Traceback (most recent call last): ... ValueError: hour must be in 0..23 ''' parse_re = re.compile(r'''^(?:(?:(?P<date_fullyear>[0-9]{4})\-(?P<date_month>[0-9]{2})\-(?P<date_mday>[0-9]{2}))T(?:(?:(?P<time_hour>[0-9]{2})\:(?P<time_minute>[0-9]{2})\:(?P<time_second>[0-9]{2})(?P<time_secfrac>(?:\.[0-9]{1,}))?)(?P<time_offset>(?:Z|(?P<time_numoffset>(?P<time_houroffset>(?:\+|\-)[0-9]{2})\:(?P<time_minuteoffset>[0-9]{2}))))))$''', re.I | re.X) match = parse_re.match(timestamp) if match is not None: if match.group('time_offset') in ["Z", "z", "+00:00", "-00:00"]: if produce_naive is True: tzinfo = None else: tzinfo = pytz.utc else: if produce_naive is True: raise ValueError("cannot produce a naive datetime from " + "a local timestamp") else: tzinfo = FixedOffset(int(match.group('time_houroffset')), int(match.group('time_minuteoffset'))) secfrac = match.group('time_secfrac') if secfrac is None: microsecond = 0 else: microsecond = int(round(float(secfrac) * 1000000)) dt_out = datetime(year=int(match.group('date_fullyear')), month=int(match.group('date_month')), day=int(match.group('date_mday')), hour=int(match.group('time_hour')), minute=int(match.group('time_minute')), second=int(match.group('time_second')), microsecond=microsecond, tzinfo=tzinfo) if utc: dt_out = dt_out.astimezone(pytz.utc) return dt_out else: raise ValueError("timestamp does not conform to RFC 3339")
python
def parse(timestamp, utc=False, produce_naive=False): ''' Parse an :RFC:`3339`-formatted timestamp and return a `datetime.datetime`. If the timestamp is presented in UTC, then the `tzinfo` parameter of the returned `datetime` will be set to `pytz.utc`. >>> parse('2009-01-01T10:01:02Z') datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>) Otherwise, a `tzinfo` instance is created with the appropriate offset, and the `tzinfo` parameter of the returned `datetime` is set to that value. >>> parse('2009-01-01T14:01:02-04:00') datetime.datetime(2009, 1, 1, 14, 1, 2, tzinfo=<UTC-04:00>) However, if `parse()` is called with `utc=True`, then the returned `datetime` will be normalized to UTC (and its tzinfo parameter set to `pytz.utc`), regardless of the input timezone. >>> parse('2009-01-01T06:01:02-04:00', utc=True) datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>) The input is strictly required to conform to :RFC:`3339`, and appropriate exceptions are thrown for invalid input. >>> parse('2009-01-01T06:01:02') Traceback (most recent call last): ... ValueError: timestamp does not conform to RFC 3339 >>> parse('2009-01-01T25:01:02Z') Traceback (most recent call last): ... ValueError: hour must be in 0..23 ''' parse_re = re.compile(r'''^(?:(?:(?P<date_fullyear>[0-9]{4})\-(?P<date_month>[0-9]{2})\-(?P<date_mday>[0-9]{2}))T(?:(?:(?P<time_hour>[0-9]{2})\:(?P<time_minute>[0-9]{2})\:(?P<time_second>[0-9]{2})(?P<time_secfrac>(?:\.[0-9]{1,}))?)(?P<time_offset>(?:Z|(?P<time_numoffset>(?P<time_houroffset>(?:\+|\-)[0-9]{2})\:(?P<time_minuteoffset>[0-9]{2}))))))$''', re.I | re.X) match = parse_re.match(timestamp) if match is not None: if match.group('time_offset') in ["Z", "z", "+00:00", "-00:00"]: if produce_naive is True: tzinfo = None else: tzinfo = pytz.utc else: if produce_naive is True: raise ValueError("cannot produce a naive datetime from " + "a local timestamp") else: tzinfo = FixedOffset(int(match.group('time_houroffset')), int(match.group('time_minuteoffset'))) secfrac = match.group('time_secfrac') if secfrac is None: microsecond = 0 else: microsecond = int(round(float(secfrac) * 1000000)) dt_out = datetime(year=int(match.group('date_fullyear')), month=int(match.group('date_month')), day=int(match.group('date_mday')), hour=int(match.group('time_hour')), minute=int(match.group('time_minute')), second=int(match.group('time_second')), microsecond=microsecond, tzinfo=tzinfo) if utc: dt_out = dt_out.astimezone(pytz.utc) return dt_out else: raise ValueError("timestamp does not conform to RFC 3339")
[ "def", "parse", "(", "timestamp", ",", "utc", "=", "False", ",", "produce_naive", "=", "False", ")", ":", "parse_re", "=", "re", ".", "compile", "(", "r'''^(?:(?:(?P<date_fullyear>[0-9]{4})\\-(?P<date_month>[0-9]{2})\\-(?P<date_mday>[0-9]{2}))T(?:(?:(?P<time_hour>[0-9]{2})\\:...
Parse an :RFC:`3339`-formatted timestamp and return a `datetime.datetime`. If the timestamp is presented in UTC, then the `tzinfo` parameter of the returned `datetime` will be set to `pytz.utc`. >>> parse('2009-01-01T10:01:02Z') datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>) Otherwise, a `tzinfo` instance is created with the appropriate offset, and the `tzinfo` parameter of the returned `datetime` is set to that value. >>> parse('2009-01-01T14:01:02-04:00') datetime.datetime(2009, 1, 1, 14, 1, 2, tzinfo=<UTC-04:00>) However, if `parse()` is called with `utc=True`, then the returned `datetime` will be normalized to UTC (and its tzinfo parameter set to `pytz.utc`), regardless of the input timezone. >>> parse('2009-01-01T06:01:02-04:00', utc=True) datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>) The input is strictly required to conform to :RFC:`3339`, and appropriate exceptions are thrown for invalid input. >>> parse('2009-01-01T06:01:02') Traceback (most recent call last): ... ValueError: timestamp does not conform to RFC 3339 >>> parse('2009-01-01T25:01:02Z') Traceback (most recent call last): ... ValueError: hour must be in 0..23
[ "Parse", "an", ":", "RFC", ":", "3339", "-", "formatted", "timestamp", "and", "return", "a", "datetime", ".", "datetime", "." ]
train
https://github.com/kurtraschke/pyRFC3339/blob/e30cc1555adce0ecc7bd65509a2249d47e5a41b4/pyrfc3339/parser.py#L9-L87
cosven/feeluown-core
fuocore/local/provider.py
add_song
def add_song(fpath, g_songs, g_artists, g_albums): """ parse music file metadata with Easymp3 and return a song model. """ try: if fpath.endswith('mp3') or fpath.endswith('ogg') or fpath.endswith('wma'): metadata = EasyMP3(fpath) elif fpath.endswith('m4a'): metadata = EasyMP4(fpath) except MutagenError as e: logger.exception('Mutagen parse metadata failed, ignore.') return None metadata_dict = dict(metadata) for key in metadata.keys(): metadata_dict[key] = metadata_dict[key][0] if 'title' not in metadata_dict: title = fpath.rsplit('/')[-1].split('.')[0] metadata_dict['title'] = title metadata_dict.update(dict( url=fpath, duration=metadata.info.length * 1000 # milesecond )) schema = EasyMP3MetadataSongSchema(strict=True) try: data, _ = schema.load(metadata_dict) except ValidationError: logger.exeception('解析音乐文件({}) 元数据失败'.format(fpath)) return # NOTE: use {title}-{artists_name}-{album_name} as song identifier title = data['title'] album_name = data['album_name'] artist_name_list = [ name.strip() for name in re.split(r'[,&]', data['artists_name'])] artists_name = ','.join(artist_name_list) duration = data['duration'] album_artist_name = data['album_artist_name'] # 生成 song model # 用来生成 id 的字符串应该尽量减少无用信息,这样或许能减少 id 冲突概率 song_id_str = ''.join([title, artists_name, album_name, str(int(duration))]) song_id = gen_id(song_id_str) if song_id not in g_songs: # 剩下 album, lyric 三个字段没有初始化 song = LSongModel(identifier=song_id, artists=[], title=title, url=fpath, duration=duration, comments=[], # 下面这些字段不向外暴露 genre=data['genre'], cover=data['cover'], date=data['date'], desc=data['desc'], disc=data['disc'], track=data['track']) g_songs[song_id] = song else: song = g_songs[song_id] logger.debug('Duplicate song: %s %s', song.url, fpath) return # 生成 album artist model album_artist_id = gen_id(album_artist_name) if album_artist_id not in g_artists: album_artist = create_artist(album_artist_id, album_artist_name) g_artists[album_artist_id] = album_artist else: album_artist = g_artists[album_artist_id] # 生成 album model album_id_str = album_name + album_artist_name album_id = gen_id(album_id_str) if album_id not in g_albums: album = create_album(album_id, album_name) g_albums[album_id] = album else: album = g_albums[album_id] # 处理专辑的歌手信息和歌曲信息,专辑歌手的专辑列表信息 if album not in album_artist.albums: album_artist.albums.append(album) if album_artist not in album.artists: album.artists.append(album_artist) if song not in album.songs: album.songs.append(song) # 处理歌曲的歌手和专辑信息,以及歌手的歌曲列表 song.album = album for artist_name in artist_name_list: artist_id = gen_id(artist_name) if artist_id in g_artists: artist = g_artists[artist_id] else: artist = create_artist(identifier=artist_id, name=artist_name) g_artists[artist_id] = artist if artist not in song.artists: song.artists.append(artist) if song not in artist.songs: artist.songs.append(song)
python
def add_song(fpath, g_songs, g_artists, g_albums): """ parse music file metadata with Easymp3 and return a song model. """ try: if fpath.endswith('mp3') or fpath.endswith('ogg') or fpath.endswith('wma'): metadata = EasyMP3(fpath) elif fpath.endswith('m4a'): metadata = EasyMP4(fpath) except MutagenError as e: logger.exception('Mutagen parse metadata failed, ignore.') return None metadata_dict = dict(metadata) for key in metadata.keys(): metadata_dict[key] = metadata_dict[key][0] if 'title' not in metadata_dict: title = fpath.rsplit('/')[-1].split('.')[0] metadata_dict['title'] = title metadata_dict.update(dict( url=fpath, duration=metadata.info.length * 1000 # milesecond )) schema = EasyMP3MetadataSongSchema(strict=True) try: data, _ = schema.load(metadata_dict) except ValidationError: logger.exeception('解析音乐文件({}) 元数据失败'.format(fpath)) return # NOTE: use {title}-{artists_name}-{album_name} as song identifier title = data['title'] album_name = data['album_name'] artist_name_list = [ name.strip() for name in re.split(r'[,&]', data['artists_name'])] artists_name = ','.join(artist_name_list) duration = data['duration'] album_artist_name = data['album_artist_name'] # 生成 song model # 用来生成 id 的字符串应该尽量减少无用信息,这样或许能减少 id 冲突概率 song_id_str = ''.join([title, artists_name, album_name, str(int(duration))]) song_id = gen_id(song_id_str) if song_id not in g_songs: # 剩下 album, lyric 三个字段没有初始化 song = LSongModel(identifier=song_id, artists=[], title=title, url=fpath, duration=duration, comments=[], # 下面这些字段不向外暴露 genre=data['genre'], cover=data['cover'], date=data['date'], desc=data['desc'], disc=data['disc'], track=data['track']) g_songs[song_id] = song else: song = g_songs[song_id] logger.debug('Duplicate song: %s %s', song.url, fpath) return # 生成 album artist model album_artist_id = gen_id(album_artist_name) if album_artist_id not in g_artists: album_artist = create_artist(album_artist_id, album_artist_name) g_artists[album_artist_id] = album_artist else: album_artist = g_artists[album_artist_id] # 生成 album model album_id_str = album_name + album_artist_name album_id = gen_id(album_id_str) if album_id not in g_albums: album = create_album(album_id, album_name) g_albums[album_id] = album else: album = g_albums[album_id] # 处理专辑的歌手信息和歌曲信息,专辑歌手的专辑列表信息 if album not in album_artist.albums: album_artist.albums.append(album) if album_artist not in album.artists: album.artists.append(album_artist) if song not in album.songs: album.songs.append(song) # 处理歌曲的歌手和专辑信息,以及歌手的歌曲列表 song.album = album for artist_name in artist_name_list: artist_id = gen_id(artist_name) if artist_id in g_artists: artist = g_artists[artist_id] else: artist = create_artist(identifier=artist_id, name=artist_name) g_artists[artist_id] = artist if artist not in song.artists: song.artists.append(artist) if song not in artist.songs: artist.songs.append(song)
[ "def", "add_song", "(", "fpath", ",", "g_songs", ",", "g_artists", ",", "g_albums", ")", ":", "try", ":", "if", "fpath", ".", "endswith", "(", "'mp3'", ")", "or", "fpath", ".", "endswith", "(", "'ogg'", ")", "or", "fpath", ".", "endswith", "(", "'wma...
parse music file metadata with Easymp3 and return a song model.
[ "parse", "music", "file", "metadata", "with", "Easymp3", "and", "return", "a", "song", "model", "." ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/local/provider.py#L67-L170
cosven/feeluown-core
fuocore/local/provider.py
Library.scan
def scan(self, paths=None, depth=2): """scan media files in all paths """ song_exts = ['mp3', 'ogg', 'wma', 'm4a'] exts = song_exts paths = paths or [Library.DEFAULT_MUSIC_FOLDER] depth = depth if depth <= 3 else 3 media_files = [] for directory in paths: logger.debug('正在扫描目录(%s)...', directory) media_files.extend(scan_directory(directory, exts, depth)) logger.info('共扫描到 %d 个音乐文件,准备将其录入本地音乐库', len(media_files)) for fpath in media_files: add_song(fpath, self._songs, self._artists, self._albums) logger.info('录入本地音乐库完毕')
python
def scan(self, paths=None, depth=2): """scan media files in all paths """ song_exts = ['mp3', 'ogg', 'wma', 'm4a'] exts = song_exts paths = paths or [Library.DEFAULT_MUSIC_FOLDER] depth = depth if depth <= 3 else 3 media_files = [] for directory in paths: logger.debug('正在扫描目录(%s)...', directory) media_files.extend(scan_directory(directory, exts, depth)) logger.info('共扫描到 %d 个音乐文件,准备将其录入本地音乐库', len(media_files)) for fpath in media_files: add_song(fpath, self._songs, self._artists, self._albums) logger.info('录入本地音乐库完毕')
[ "def", "scan", "(", "self", ",", "paths", "=", "None", ",", "depth", "=", "2", ")", ":", "song_exts", "=", "[", "'mp3'", ",", "'ogg'", ",", "'wma'", ",", "'m4a'", "]", "exts", "=", "song_exts", "paths", "=", "paths", "or", "[", "Library", ".", "D...
scan media files in all paths
[ "scan", "media", "files", "in", "all", "paths" ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/local/provider.py#L194-L209
SmartTeleMax/iktomi
iktomi/web/core.py
cases.cases
def cases(self, env, data): '''Calls each nested handler until one of them returns nonzero result. If any handler returns `None`, it is interpreted as "request does not match, the handler has nothing to do with it and `web.cases` should try to call the next handler".''' for handler in self.handlers: env._push() data._push() try: result = handler(env, data) finally: env._pop() data._pop() if result is not None: return result
python
def cases(self, env, data): '''Calls each nested handler until one of them returns nonzero result. If any handler returns `None`, it is interpreted as "request does not match, the handler has nothing to do with it and `web.cases` should try to call the next handler".''' for handler in self.handlers: env._push() data._push() try: result = handler(env, data) finally: env._pop() data._pop() if result is not None: return result
[ "def", "cases", "(", "self", ",", "env", ",", "data", ")", ":", "for", "handler", "in", "self", ".", "handlers", ":", "env", ".", "_push", "(", ")", "data", ".", "_push", "(", ")", "try", ":", "result", "=", "handler", "(", "env", ",", "data", ...
Calls each nested handler until one of them returns nonzero result. If any handler returns `None`, it is interpreted as "request does not match, the handler has nothing to do with it and `web.cases` should try to call the next handler".
[ "Calls", "each", "nested", "handler", "until", "one", "of", "them", "returns", "nonzero", "result", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/web/core.py#L123-L138
mithro/python-datetime-tz
datetime_tz/update_win32tz_map.py
create_win32tz_map
def create_win32tz_map(windows_zones_xml): """Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment) """ coming_comment = None win32_name = None territory = None parser = genshi.input.XMLParser(StringIO(windows_zones_xml)) map_zones = {} zone_comments = {} for kind, data, _ in parser: if kind == genshi.core.START and str(data[0]) == "mapZone": attrs = data[1] win32_name, territory, olson_name = ( attrs.get("other"), attrs.get("territory"), attrs.get("type").split(" ")[0]) map_zones[(win32_name, territory)] = olson_name elif kind == genshi.core.END and str(data) == "mapZone" and win32_name: if coming_comment: zone_comments[(win32_name, territory)] = coming_comment coming_comment = None win32_name = None elif kind == genshi.core.COMMENT: coming_comment = data.strip() elif kind in (genshi.core.START, genshi.core.END, genshi.core.COMMENT): coming_comment = None for win32_name, territory in sorted(map_zones): yield (win32_name, territory, map_zones[(win32_name, territory)], zone_comments.get((win32_name, territory), None))
python
def create_win32tz_map(windows_zones_xml): """Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment) """ coming_comment = None win32_name = None territory = None parser = genshi.input.XMLParser(StringIO(windows_zones_xml)) map_zones = {} zone_comments = {} for kind, data, _ in parser: if kind == genshi.core.START and str(data[0]) == "mapZone": attrs = data[1] win32_name, territory, olson_name = ( attrs.get("other"), attrs.get("territory"), attrs.get("type").split(" ")[0]) map_zones[(win32_name, territory)] = olson_name elif kind == genshi.core.END and str(data) == "mapZone" and win32_name: if coming_comment: zone_comments[(win32_name, territory)] = coming_comment coming_comment = None win32_name = None elif kind == genshi.core.COMMENT: coming_comment = data.strip() elif kind in (genshi.core.START, genshi.core.END, genshi.core.COMMENT): coming_comment = None for win32_name, territory in sorted(map_zones): yield (win32_name, territory, map_zones[(win32_name, territory)], zone_comments.get((win32_name, territory), None))
[ "def", "create_win32tz_map", "(", "windows_zones_xml", ")", ":", "coming_comment", "=", "None", "win32_name", "=", "None", "territory", "=", "None", "parser", "=", "genshi", ".", "input", ".", "XMLParser", "(", "StringIO", "(", "windows_zones_xml", ")", ")", "...
Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment)
[ "Creates", "a", "map", "between", "Windows", "and", "Olson", "timezone", "names", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/update_win32tz_map.py#L57-L92
mithro/python-datetime-tz
datetime_tz/update_win32tz_map.py
update_stored_win32tz_map
def update_stored_win32tz_map(): """Downloads the cldr win32 timezone map and stores it in win32tz_map.py.""" windows_zones_xml = download_cldr_win32tz_map_xml() source_hash = hashlib.md5(windows_zones_xml).hexdigest() if hasattr(windows_zones_xml, "decode"): windows_zones_xml = windows_zones_xml.decode("utf-8") map_zones = create_win32tz_map(windows_zones_xml) map_dir = os.path.dirname(os.path.abspath(__file__)) map_filename = os.path.join(map_dir, "win32tz_map.py") if os.path.exists(map_filename): reload(win32tz_map) current_hash = getattr(win32tz_map, "source_hash", None) if current_hash == source_hash: return False map_file = open(map_filename, "w") comment = "Map between Windows and Olson timezones taken from %s" % ( _CLDR_WINZONES_URL,) comment2 = "Generated automatically from datetime_tz.py" map_file.write("'''%s\n" % comment) map_file.write("%s'''\n" % comment2) map_file.write("source_hash = '%s' # md5 sum of xml source data\n" % ( source_hash)) map_file.write("win32timezones = {\n") for win32_name, territory, olson_name, comment in map_zones: if territory == '001': map_file.write(" %r: %r, # %s\n" % ( str(win32_name), str(olson_name), comment or "")) else: map_file.write(" %r: %r, # %s\n" % ( (str(win32_name), str(territory)), str(olson_name), comment or "")) map_file.write("}\n") map_file.close() return True
python
def update_stored_win32tz_map(): """Downloads the cldr win32 timezone map and stores it in win32tz_map.py.""" windows_zones_xml = download_cldr_win32tz_map_xml() source_hash = hashlib.md5(windows_zones_xml).hexdigest() if hasattr(windows_zones_xml, "decode"): windows_zones_xml = windows_zones_xml.decode("utf-8") map_zones = create_win32tz_map(windows_zones_xml) map_dir = os.path.dirname(os.path.abspath(__file__)) map_filename = os.path.join(map_dir, "win32tz_map.py") if os.path.exists(map_filename): reload(win32tz_map) current_hash = getattr(win32tz_map, "source_hash", None) if current_hash == source_hash: return False map_file = open(map_filename, "w") comment = "Map between Windows and Olson timezones taken from %s" % ( _CLDR_WINZONES_URL,) comment2 = "Generated automatically from datetime_tz.py" map_file.write("'''%s\n" % comment) map_file.write("%s'''\n" % comment2) map_file.write("source_hash = '%s' # md5 sum of xml source data\n" % ( source_hash)) map_file.write("win32timezones = {\n") for win32_name, territory, olson_name, comment in map_zones: if territory == '001': map_file.write(" %r: %r, # %s\n" % ( str(win32_name), str(olson_name), comment or "")) else: map_file.write(" %r: %r, # %s\n" % ( (str(win32_name), str(territory)), str(olson_name), comment or "")) map_file.write("}\n") map_file.close() return True
[ "def", "update_stored_win32tz_map", "(", ")", ":", "windows_zones_xml", "=", "download_cldr_win32tz_map_xml", "(", ")", "source_hash", "=", "hashlib", ".", "md5", "(", "windows_zones_xml", ")", ".", "hexdigest", "(", ")", "if", "hasattr", "(", "windows_zones_xml", ...
Downloads the cldr win32 timezone map and stores it in win32tz_map.py.
[ "Downloads", "the", "cldr", "win32", "timezone", "map", "and", "stores", "it", "in", "win32tz_map", ".", "py", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/update_win32tz_map.py#L95-L134
andreafioraldi/angrdbg
angrdbg/page_7.py
DbgPage.load_mo
def load_mo(self, state, page_idx): """ Loads a memory object from memory. :param page_idx: the index into the page :returns: a tuple of the object """ mo = self._storage[page_idx - self._page_addr] #print filter(lambda x: x != None, self._storage) if mo is None and hasattr(self, "from_dbg"): byte_val = get_debugger().get_byte(page_idx) mo = SimMemoryObject(claripy.BVV(byte_val, 8), page_idx) self._storage[page_idx - self._page_addr] = mo return mo
python
def load_mo(self, state, page_idx): """ Loads a memory object from memory. :param page_idx: the index into the page :returns: a tuple of the object """ mo = self._storage[page_idx - self._page_addr] #print filter(lambda x: x != None, self._storage) if mo is None and hasattr(self, "from_dbg"): byte_val = get_debugger().get_byte(page_idx) mo = SimMemoryObject(claripy.BVV(byte_val, 8), page_idx) self._storage[page_idx - self._page_addr] = mo return mo
[ "def", "load_mo", "(", "self", ",", "state", ",", "page_idx", ")", ":", "mo", "=", "self", ".", "_storage", "[", "page_idx", "-", "self", ".", "_page_addr", "]", "#print filter(lambda x: x != None, self._storage)", "if", "mo", "is", "None", "and", "hasattr", ...
Loads a memory object from memory. :param page_idx: the index into the page :returns: a tuple of the object
[ "Loads", "a", "memory", "object", "from", "memory", ".", ":", "param", "page_idx", ":", "the", "index", "into", "the", "page", ":", "returns", ":", "a", "tuple", "of", "the", "object" ]
train
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L78-L90
andreafioraldi/angrdbg
angrdbg/page_7.py
DbgPage.load_slice
def load_slice(self, state, start, end): """ Return the memory objects overlapping with the provided slice. :param start: the start address :param end: the end address (non-inclusive) :returns: tuples of (starting_addr, memory_object) """ items = [] if start > self._page_addr + self._page_size or end < self._page_addr: l.warning("Calling load_slice on the wrong page.") return items for addr in range(max(start, self._page_addr), min( end, self._page_addr + self._page_size)): i = addr - self._page_addr mo = self._storage[i] if mo is None and hasattr(self, "from_dbg"): byte_val = get_debugger().get_byte(addr) mo = SimMemoryObject(claripy.BVV(byte_val, 8), addr) self._storage[i] = mo if mo is not None and (not items or items[-1][1] is not mo): items.append((addr, mo)) #print filter(lambda x: x != None, self._storage) return items
python
def load_slice(self, state, start, end): """ Return the memory objects overlapping with the provided slice. :param start: the start address :param end: the end address (non-inclusive) :returns: tuples of (starting_addr, memory_object) """ items = [] if start > self._page_addr + self._page_size or end < self._page_addr: l.warning("Calling load_slice on the wrong page.") return items for addr in range(max(start, self._page_addr), min( end, self._page_addr + self._page_size)): i = addr - self._page_addr mo = self._storage[i] if mo is None and hasattr(self, "from_dbg"): byte_val = get_debugger().get_byte(addr) mo = SimMemoryObject(claripy.BVV(byte_val, 8), addr) self._storage[i] = mo if mo is not None and (not items or items[-1][1] is not mo): items.append((addr, mo)) #print filter(lambda x: x != None, self._storage) return items
[ "def", "load_slice", "(", "self", ",", "state", ",", "start", ",", "end", ")", ":", "items", "=", "[", "]", "if", "start", ">", "self", ".", "_page_addr", "+", "self", ".", "_page_size", "or", "end", "<", "self", ".", "_page_addr", ":", "l", ".", ...
Return the memory objects overlapping with the provided slice. :param start: the start address :param end: the end address (non-inclusive) :returns: tuples of (starting_addr, memory_object)
[ "Return", "the", "memory", "objects", "overlapping", "with", "the", "provided", "slice", ".", ":", "param", "start", ":", "the", "start", "address", ":", "param", "end", ":", "the", "end", "address", "(", "non", "-", "inclusive", ")", ":", "returns", ":"...
train
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L92-L115
andreafioraldi/angrdbg
angrdbg/page_7.py
SimDbgMemory.load_objects
def load_objects(self, addr, num_bytes, ret_on_segv=False): """ Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple """ result = [] end = addr + num_bytes for page_addr in self._containing_pages(addr, end): try: #print "Getting page %x" % (page_addr // self._page_size) page = self._get_page(page_addr // self._page_size) #print "... got it" except KeyError: #print "... missing" #print "... SEGV" # missing page if self.allow_segv: if ret_on_segv: break raise SimSegfaultError(addr, 'read-miss') else: continue if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ: #print "... SEGV" if ret_on_segv: break raise SimSegfaultError(addr, 'non-readable') result.extend(page.load_slice(self.state, addr, end)) return result
python
def load_objects(self, addr, num_bytes, ret_on_segv=False): """ Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple """ result = [] end = addr + num_bytes for page_addr in self._containing_pages(addr, end): try: #print "Getting page %x" % (page_addr // self._page_size) page = self._get_page(page_addr // self._page_size) #print "... got it" except KeyError: #print "... missing" #print "... SEGV" # missing page if self.allow_segv: if ret_on_segv: break raise SimSegfaultError(addr, 'read-miss') else: continue if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ: #print "... SEGV" if ret_on_segv: break raise SimSegfaultError(addr, 'non-readable') result.extend(page.load_slice(self.state, addr, end)) return result
[ "def", "load_objects", "(", "self", ",", "addr", ",", "num_bytes", ",", "ret_on_segv", "=", "False", ")", ":", "result", "=", "[", "]", "end", "=", "addr", "+", "num_bytes", "for", "page_addr", "in", "self", ".", "_containing_pages", "(", "addr", ",", ...
Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple
[ "Load", "memory", "objects", "from", "paged", "memory", "." ]
train
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L247-L284
andreafioraldi/angrdbg
angrdbg/page_7.py
SimDbgMemory.memory_objects_for_name
def memory_objects_for_name(self, n): """ Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the name of `n`. This is useful for replacing those values in one fell swoop with :func:`replace_memory_object()`, even if they have been partially overwritten. """ return set([self[i] for i in self.addrs_for_name(n)])
python
def memory_objects_for_name(self, n): """ Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the name of `n`. This is useful for replacing those values in one fell swoop with :func:`replace_memory_object()`, even if they have been partially overwritten. """ return set([self[i] for i in self.addrs_for_name(n)])
[ "def", "memory_objects_for_name", "(", "self", ",", "n", ")", ":", "return", "set", "(", "[", "self", "[", "i", "]", "for", "i", "in", "self", ".", "addrs_for_name", "(", "n", ")", "]", ")" ]
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the name of `n`. This is useful for replacing those values in one fell swoop with :func:`replace_memory_object()`, even if they have been partially overwritten.
[ "Returns", "a", "set", "of", ":", "class", ":", "SimMemoryObjects", "that", "contain", "expressions", "that", "contain", "a", "variable", "with", "the", "name", "of", "n", "." ]
train
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L858-L866
andreafioraldi/angrdbg
angrdbg/page_7.py
SimDbgMemory.memory_objects_for_hash
def memory_objects_for_hash(self, n): """ Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash `h`. """ return set([self[i] for i in self.addrs_for_hash(n)])
python
def memory_objects_for_hash(self, n): """ Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash `h`. """ return set([self[i] for i in self.addrs_for_hash(n)])
[ "def", "memory_objects_for_hash", "(", "self", ",", "n", ")", ":", "return", "set", "(", "[", "self", "[", "i", "]", "for", "i", "in", "self", ".", "addrs_for_hash", "(", "n", ")", "]", ")" ]
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash `h`.
[ "Returns", "a", "set", "of", ":", "class", ":", "SimMemoryObjects", "that", "contain", "expressions", "that", "contain", "a", "variable", "with", "the", "hash", "h", "." ]
train
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L868-L873
andreafioraldi/angrdbg
angrdbg/page_7.py
SimDbgMemory.permissions
def permissions(self, addr, permissions=None): """ Returns the permissions for a page at address `addr`. If optional argument permissions is given, set page permissions to that prior to returning permissions. """ if self.state.solver.symbolic(addr): raise SimMemoryError( "page permissions cannot currently be looked up for symbolic addresses") if isinstance(addr, claripy.ast.bv.BV): addr = self.state.solver.eval(addr) page_num = addr // self._page_size try: page = self._get_page(page_num) except KeyError: raise SimMemoryError("page does not exist at given address") # Set permissions for the page if permissions is not None: if isinstance(permissions, (int, long)): permissions = claripy.BVV(permissions, 3) if not isinstance(permissions, claripy.ast.bv.BV): raise SimMemoryError( "Unknown permissions argument type of {0}.".format( type(permissions))) page.permissions = permissions return page.permissions
python
def permissions(self, addr, permissions=None): """ Returns the permissions for a page at address `addr`. If optional argument permissions is given, set page permissions to that prior to returning permissions. """ if self.state.solver.symbolic(addr): raise SimMemoryError( "page permissions cannot currently be looked up for symbolic addresses") if isinstance(addr, claripy.ast.bv.BV): addr = self.state.solver.eval(addr) page_num = addr // self._page_size try: page = self._get_page(page_num) except KeyError: raise SimMemoryError("page does not exist at given address") # Set permissions for the page if permissions is not None: if isinstance(permissions, (int, long)): permissions = claripy.BVV(permissions, 3) if not isinstance(permissions, claripy.ast.bv.BV): raise SimMemoryError( "Unknown permissions argument type of {0}.".format( type(permissions))) page.permissions = permissions return page.permissions
[ "def", "permissions", "(", "self", ",", "addr", ",", "permissions", "=", "None", ")", ":", "if", "self", ".", "state", ".", "solver", ".", "symbolic", "(", "addr", ")", ":", "raise", "SimMemoryError", "(", "\"page permissions cannot currently be looked up for sy...
Returns the permissions for a page at address `addr`. If optional argument permissions is given, set page permissions to that prior to returning permissions.
[ "Returns", "the", "permissions", "for", "a", "page", "at", "address", "addr", "." ]
train
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L875-L908
CivicSpleen/ambry
ambry/bundle/process.py
call_interval
def call_interval(freq, **kwargs): """Decorator for the CallInterval wrapper""" def wrapper(f): return CallInterval(f, freq, **kwargs) return wrapper
python
def call_interval(freq, **kwargs): """Decorator for the CallInterval wrapper""" def wrapper(f): return CallInterval(f, freq, **kwargs) return wrapper
[ "def", "call_interval", "(", "freq", ",", "*", "*", "kwargs", ")", ":", "def", "wrapper", "(", "f", ")", ":", "return", "CallInterval", "(", "f", ",", "freq", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorator for the CallInterval wrapper
[ "Decorator", "for", "the", "CallInterval", "wrapper" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L431-L436
CivicSpleen/ambry
ambry/bundle/process.py
ProgressSection.add
def add(self, *args, **kwargs): """Add a new record to the section""" if self.start and self.start.state == 'done' and kwargs.get('log_action') != 'done': raise ProgressLoggingError("Can't add -- process section is done") self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'add') rec = Process(**kwargs) self._session.add(rec) self.rec = rec if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
python
def add(self, *args, **kwargs): """Add a new record to the section""" if self.start and self.start.state == 'done' and kwargs.get('log_action') != 'done': raise ProgressLoggingError("Can't add -- process section is done") self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'add') rec = Process(**kwargs) self._session.add(rec) self.rec = rec if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
[ "def", "add", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "start", "and", "self", ".", "start", ".", "state", "==", "'done'", "and", "kwargs", ".", "get", "(", "'log_action'", ")", "!=", "'done'", ":", "ra...
Add a new record to the section
[ "Add", "a", "new", "record", "to", "the", "section" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L111-L133
CivicSpleen/ambry
ambry/bundle/process.py
ProgressSection.update
def update(self, *args, **kwargs): """Update the last section record""" self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'update') if not self.rec: return self.add(**kwargs) else: for k, v in kwargs.items(): # Don't update object; use whatever was set in the original record if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'): setattr(self.rec, k, v) self._session.merge(self.rec) if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
python
def update(self, *args, **kwargs): """Update the last section record""" self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'update') if not self.rec: return self.add(**kwargs) else: for k, v in kwargs.items(): # Don't update object; use whatever was set in the original record if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'): setattr(self.rec, k, v) self._session.merge(self.rec) if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "augment_args", "(", "args", ",", "kwargs", ")", "kwargs", "[", "'log_action'", "]", "=", "kwargs", ".", "get", "(", "'log_action'", ",", "'update'", ")",...
Update the last section record
[ "Update", "the", "last", "section", "record" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L135-L157
CivicSpleen/ambry
ambry/bundle/process.py
ProgressSection.add_update
def add_update(self, *args, **kwargs): """A records is added, then on subsequent calls, updated""" if not self._ai_rec_id: self._ai_rec_id = self.add(*args, **kwargs) else: au_save = self._ai_rec_id self.update(*args, **kwargs) self._ai_rec_id = au_save return self._ai_rec_id
python
def add_update(self, *args, **kwargs): """A records is added, then on subsequent calls, updated""" if not self._ai_rec_id: self._ai_rec_id = self.add(*args, **kwargs) else: au_save = self._ai_rec_id self.update(*args, **kwargs) self._ai_rec_id = au_save return self._ai_rec_id
[ "def", "add_update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "_ai_rec_id", ":", "self", ".", "_ai_rec_id", "=", "self", ".", "add", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", ...
A records is added, then on subsequent calls, updated
[ "A", "records", "is", "added", "then", "on", "subsequent", "calls", "updated" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L159-L169
CivicSpleen/ambry
ambry/bundle/process.py
ProgressSection.update_done
def update_done(self, *args, **kwargs): """Clear out the previous update""" kwargs['state'] = 'done' self.update(*args, **kwargs) self.rec = None
python
def update_done(self, *args, **kwargs): """Clear out the previous update""" kwargs['state'] = 'done' self.update(*args, **kwargs) self.rec = None
[ "def", "update_done", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'state'", "]", "=", "'done'", "self", ".", "update", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "rec", "=", "None" ]
Clear out the previous update
[ "Clear", "out", "the", "previous", "update" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L171-L175
CivicSpleen/ambry
ambry/bundle/process.py
ProgressSection.done
def done(self, *args, **kwargs): """Mark the whole ProgressSection as done""" kwargs['state'] = 'done' pr_id = self.add(*args, log_action='done', **kwargs) self._session.query(Process).filter(Process.group == self._group).update({Process.state: 'done'}) self.start.state = 'done' self._session.commit() return pr_id
python
def done(self, *args, **kwargs): """Mark the whole ProgressSection as done""" kwargs['state'] = 'done' pr_id = self.add(*args, log_action='done', **kwargs) self._session.query(Process).filter(Process.group == self._group).update({Process.state: 'done'}) self.start.state = 'done' self._session.commit() return pr_id
[ "def", "done", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'state'", "]", "=", "'done'", "pr_id", "=", "self", ".", "add", "(", "*", "args", ",", "log_action", "=", "'done'", ",", "*", "*", "kwargs", ")", "...
Mark the whole ProgressSection as done
[ "Mark", "the", "whole", "ProgressSection", "as", "done" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L177-L186
CivicSpleen/ambry
ambry/bundle/process.py
ProcessLogger.start
def start(self, phase, stage, **kwargs): """Start a new routine, stage or phase""" return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs)
python
def start(self, phase, stage, **kwargs): """Start a new routine, stage or phase""" return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs)
[ "def", "start", "(", "self", ",", "phase", ",", "stage", ",", "*", "*", "kwargs", ")", ":", "return", "ProgressSection", "(", "self", ",", "self", ".", "_session", ",", "phase", ",", "stage", ",", "self", ".", "_logger", ",", "*", "*", "kwargs", ")...
Start a new routine, stage or phase
[ "Start", "a", "new", "routine", "stage", "or", "phase" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L258-L260
CivicSpleen/ambry
ambry/bundle/process.py
ProcessLogger.records
def records(self): """Return all start records for this the dataset, grouped by the start record""" return (self._session.query(Process) .filter(Process.d_vid == self._d_vid)).all()
python
def records(self): """Return all start records for this the dataset, grouped by the start record""" return (self._session.query(Process) .filter(Process.d_vid == self._d_vid)).all()
[ "def", "records", "(", "self", ")", ":", "return", "(", "self", ".", "_session", ".", "query", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "self", ".", "_d_vid", ")", ")", ".", "all", "(", ")" ]
Return all start records for this the dataset, grouped by the start record
[ "Return", "all", "start", "records", "for", "this", "the", "dataset", "grouped", "by", "the", "start", "record" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L263-L267
CivicSpleen/ambry
ambry/bundle/process.py
ProcessLogger.starts
def starts(self): """Return all start records for this the dataset, grouped by the start record""" return (self._session.query(Process) .filter(Process.d_vid == self._d_vid) .filter(Process.log_action == 'start') ).all()
python
def starts(self): """Return all start records for this the dataset, grouped by the start record""" return (self._session.query(Process) .filter(Process.d_vid == self._d_vid) .filter(Process.log_action == 'start') ).all()
[ "def", "starts", "(", "self", ")", ":", "return", "(", "self", ".", "_session", ".", "query", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "self", ".", "_d_vid", ")", ".", "filter", "(", "Process", ".", "log_action", "==", ...
Return all start records for this the dataset, grouped by the start record
[ "Return", "all", "start", "records", "for", "this", "the", "dataset", "grouped", "by", "the", "start", "record" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L270-L276
CivicSpleen/ambry
ambry/bundle/process.py
ProcessLogger.query
def query(self): """Return all start records for this the dataset, grouped by the start record""" return self._session.query(Process).filter(Process.d_vid == self._d_vid)
python
def query(self): """Return all start records for this the dataset, grouped by the start record""" return self._session.query(Process).filter(Process.d_vid == self._d_vid)
[ "def", "query", "(", "self", ")", ":", "return", "self", ".", "_session", ".", "query", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "self", ".", "_d_vid", ")" ]
Return all start records for this the dataset, grouped by the start record
[ "Return", "all", "start", "records", "for", "this", "the", "dataset", "grouped", "by", "the", "start", "record" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L279-L282
CivicSpleen/ambry
ambry/bundle/process.py
ProcessLogger.exceptions
def exceptions(self): """Return all start records for this the dataset, grouped by the start record""" return (self._session.query(Process) .filter(Process.d_vid == self._d_vid) .filter(Process.exception_class != None) .order_by(Process.modified)).all()
python
def exceptions(self): """Return all start records for this the dataset, grouped by the start record""" return (self._session.query(Process) .filter(Process.d_vid == self._d_vid) .filter(Process.exception_class != None) .order_by(Process.modified)).all()
[ "def", "exceptions", "(", "self", ")", ":", "return", "(", "self", ".", "_session", ".", "query", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "self", ".", "_d_vid", ")", ".", "filter", "(", "Process", ".", "exception_class",...
Return all start records for this the dataset, grouped by the start record
[ "Return", "all", "start", "records", "for", "this", "the", "dataset", "grouped", "by", "the", "start", "record" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L285-L291
CivicSpleen/ambry
ambry/bundle/process.py
ProcessLogger.clean
def clean(self): """Delete all of the records""" # Deleting seems to be really weird and unrelable. self._session \ .query(Process) \ .filter(Process.d_vid == self._d_vid) \ .delete(synchronize_session='fetch') for r in self.records: self._session.delete(r) self._session.commit()
python
def clean(self): """Delete all of the records""" # Deleting seems to be really weird and unrelable. self._session \ .query(Process) \ .filter(Process.d_vid == self._d_vid) \ .delete(synchronize_session='fetch') for r in self.records: self._session.delete(r) self._session.commit()
[ "def", "clean", "(", "self", ")", ":", "# Deleting seems to be really weird and unrelable.", "self", ".", "_session", ".", "query", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "self", ".", "_d_vid", ")", ".", "delete", "(", "synch...
Delete all of the records
[ "Delete", "all", "of", "the", "records" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L293-L305
CivicSpleen/ambry
ambry/bundle/process.py
ProcessLogger.build
def build(self): """Access build configuration values as attributes. See self.process for a usage example""" from ambry.orm.config import BuildConfigGroupAccessor # It is a lightweight object, so no need to cache return BuildConfigGroupAccessor(self.dataset, 'buildstate', self._session)
python
def build(self): """Access build configuration values as attributes. See self.process for a usage example""" from ambry.orm.config import BuildConfigGroupAccessor # It is a lightweight object, so no need to cache return BuildConfigGroupAccessor(self.dataset, 'buildstate', self._session)
[ "def", "build", "(", "self", ")", ":", "from", "ambry", ".", "orm", ".", "config", "import", "BuildConfigGroupAccessor", "# It is a lightweight object, so no need to cache", "return", "BuildConfigGroupAccessor", "(", "self", ".", "dataset", ",", "'buildstate'", ",", "...
Access build configuration values as attributes. See self.process for a usage example
[ "Access", "build", "configuration", "values", "as", "attributes", ".", "See", "self", ".", "process", "for", "a", "usage", "example" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L319-L325
sailthru/relay
relay/relay_logging.py
configure_logging
def configure_logging(add_handler, log=log): """ Configure log records. If adding a handler, make the formatter print all passed in key:value data. ie log.extra('msg', extra=dict(a=1)) generates 'msg a=1' `add_handler` (True, False, None, or Handler instance) if True, add a logging.StreamHandler() instance if False, do not add any handlers. if given a handler instance, add that the the logger """ _ignore_log_keys = set(logging.makeLogRecord({}).__dict__) def _json_format(record): extras = ' '.join( "%s=%s" % (k, record.__dict__[k]) for k in set(record.__dict__).difference(_ignore_log_keys)) if extras: record.msg = "%s %s" % (record.msg, extras) return record class ColoredJsonFormatter(ColoredFormatter): def format(self, record): record = _json_format(record) return super(ColoredJsonFormatter, self).format(record) if isinstance(add_handler, logging.Handler): log.addHandler(add_handler) elif add_handler is True: if not any(isinstance(h, logging.StreamHandler) for h in log.handlers): _h = logging.StreamHandler() _h.setFormatter(ColoredJsonFormatter( "%(log_color)s%(levelname)-8s %(message)s %(reset)s %(cyan)s", reset=True )) log.addHandler(_h) elif not log.handlers: log.addHandler(logging.NullHandler()) log.setLevel(logging.DEBUG) log.propagate = False return log
python
def configure_logging(add_handler, log=log): """ Configure log records. If adding a handler, make the formatter print all passed in key:value data. ie log.extra('msg', extra=dict(a=1)) generates 'msg a=1' `add_handler` (True, False, None, or Handler instance) if True, add a logging.StreamHandler() instance if False, do not add any handlers. if given a handler instance, add that the the logger """ _ignore_log_keys = set(logging.makeLogRecord({}).__dict__) def _json_format(record): extras = ' '.join( "%s=%s" % (k, record.__dict__[k]) for k in set(record.__dict__).difference(_ignore_log_keys)) if extras: record.msg = "%s %s" % (record.msg, extras) return record class ColoredJsonFormatter(ColoredFormatter): def format(self, record): record = _json_format(record) return super(ColoredJsonFormatter, self).format(record) if isinstance(add_handler, logging.Handler): log.addHandler(add_handler) elif add_handler is True: if not any(isinstance(h, logging.StreamHandler) for h in log.handlers): _h = logging.StreamHandler() _h.setFormatter(ColoredJsonFormatter( "%(log_color)s%(levelname)-8s %(message)s %(reset)s %(cyan)s", reset=True )) log.addHandler(_h) elif not log.handlers: log.addHandler(logging.NullHandler()) log.setLevel(logging.DEBUG) log.propagate = False return log
[ "def", "configure_logging", "(", "add_handler", ",", "log", "=", "log", ")", ":", "_ignore_log_keys", "=", "set", "(", "logging", ".", "makeLogRecord", "(", "{", "}", ")", ".", "__dict__", ")", "def", "_json_format", "(", "record", ")", ":", "extras", "=...
Configure log records. If adding a handler, make the formatter print all passed in key:value data. ie log.extra('msg', extra=dict(a=1)) generates 'msg a=1' `add_handler` (True, False, None, or Handler instance) if True, add a logging.StreamHandler() instance if False, do not add any handlers. if given a handler instance, add that the the logger
[ "Configure", "log", "records", ".", "If", "adding", "a", "handler", "make", "the", "formatter", "print", "all", "passed", "in", "key", ":", "value", "data", ".", "ie", "log", ".", "extra", "(", "msg", "extra", "=", "dict", "(", "a", "=", "1", "))", ...
train
https://github.com/sailthru/relay/blob/995209346c6663675d96d0cbff3bb67b9758c8e2/relay/relay_logging.py#L7-L47
project-ncl/pnc-cli
pnc_cli/swagger_client/models/build_environment_rest.py
BuildEnvironmentRest.system_image_type
def system_image_type(self, system_image_type): """ Sets the system_image_type of this BuildEnvironmentRest. :param system_image_type: The system_image_type of this BuildEnvironmentRest. :type: str """ allowed_values = ["DOCKER_IMAGE", "VIRTUAL_MACHINE_RAW", "VIRTUAL_MACHINE_QCOW2", "LOCAL_WORKSPACE"] if system_image_type not in allowed_values: raise ValueError( "Invalid value for `system_image_type` ({0}), must be one of {1}" .format(system_image_type, allowed_values) ) self._system_image_type = system_image_type
python
def system_image_type(self, system_image_type): """ Sets the system_image_type of this BuildEnvironmentRest. :param system_image_type: The system_image_type of this BuildEnvironmentRest. :type: str """ allowed_values = ["DOCKER_IMAGE", "VIRTUAL_MACHINE_RAW", "VIRTUAL_MACHINE_QCOW2", "LOCAL_WORKSPACE"] if system_image_type not in allowed_values: raise ValueError( "Invalid value for `system_image_type` ({0}), must be one of {1}" .format(system_image_type, allowed_values) ) self._system_image_type = system_image_type
[ "def", "system_image_type", "(", "self", ",", "system_image_type", ")", ":", "allowed_values", "=", "[", "\"DOCKER_IMAGE\"", ",", "\"VIRTUAL_MACHINE_RAW\"", ",", "\"VIRTUAL_MACHINE_QCOW2\"", ",", "\"LOCAL_WORKSPACE\"", "]", "if", "system_image_type", "not", "in", "allow...
Sets the system_image_type of this BuildEnvironmentRest. :param system_image_type: The system_image_type of this BuildEnvironmentRest. :type: str
[ "Sets", "the", "system_image_type", "of", "this", "BuildEnvironmentRest", "." ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/models/build_environment_rest.py#L228-L242
jedie/django-cms-tools
django_cms_tools/fixture_helper/page_utils.py
get_public_cms_app_namespaces
def get_public_cms_app_namespaces(): """ :return: a tuple() with all cms app namespaces """ qs = Page.objects.public() qs = qs.exclude(application_namespace=None) qs = qs.order_by('application_namespace') try: application_namespaces = list( qs.distinct('application_namespace').values_list( 'application_namespace', flat=True)) except NotImplementedError: # If SQLite used: # DISTINCT ON fields is not supported by this database backend application_namespaces = list( set(qs.values_list('application_namespace', flat=True))) application_namespaces.sort() return tuple(application_namespaces)
python
def get_public_cms_app_namespaces(): """ :return: a tuple() with all cms app namespaces """ qs = Page.objects.public() qs = qs.exclude(application_namespace=None) qs = qs.order_by('application_namespace') try: application_namespaces = list( qs.distinct('application_namespace').values_list( 'application_namespace', flat=True)) except NotImplementedError: # If SQLite used: # DISTINCT ON fields is not supported by this database backend application_namespaces = list( set(qs.values_list('application_namespace', flat=True))) application_namespaces.sort() return tuple(application_namespaces)
[ "def", "get_public_cms_app_namespaces", "(", ")", ":", "qs", "=", "Page", ".", "objects", ".", "public", "(", ")", "qs", "=", "qs", ".", "exclude", "(", "application_namespace", "=", "None", ")", "qs", "=", "qs", ".", "order_by", "(", "'application_namespa...
:return: a tuple() with all cms app namespaces
[ ":", "return", ":", "a", "tuple", "()", "with", "all", "cms", "app", "namespaces" ]
train
https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/page_utils.py#L10-L30
jedie/django-cms-tools
django_cms_tools/fixture_helper/page_utils.py
get_public_cms_page_urls
def get_public_cms_page_urls(*, language_code): """ :param language_code: e.g.: "en" or "de" :return: Tuple with all public urls in the given language """ pages = Page.objects.public() urls = [page.get_absolute_url(language=language_code) for page in pages] urls.sort() return tuple(urls)
python
def get_public_cms_page_urls(*, language_code): """ :param language_code: e.g.: "en" or "de" :return: Tuple with all public urls in the given language """ pages = Page.objects.public() urls = [page.get_absolute_url(language=language_code) for page in pages] urls.sort() return tuple(urls)
[ "def", "get_public_cms_page_urls", "(", "*", ",", "language_code", ")", ":", "pages", "=", "Page", ".", "objects", ".", "public", "(", ")", "urls", "=", "[", "page", ".", "get_absolute_url", "(", "language", "=", "language_code", ")", "for", "page", "in", ...
:param language_code: e.g.: "en" or "de" :return: Tuple with all public urls in the given language
[ ":", "param", "language_code", ":", "e", ".", "g", ".", ":", "en", "or", "de", ":", "return", ":", "Tuple", "with", "all", "public", "urls", "in", "the", "given", "language" ]
train
https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/page_utils.py#L33-L41
CivicSpleen/ambry
ambry/metadata/schema.py
ExtDoc.group_by_source
def group_by_source(self): """Return a dict of all of the docs, with the source associated with the doc as a key""" from collections import defaultdict docs = defaultdict(list) for k, v in self.items(): if 'source' in v: docs[v.source].append(dict(v.items())) return docs
python
def group_by_source(self): """Return a dict of all of the docs, with the source associated with the doc as a key""" from collections import defaultdict docs = defaultdict(list) for k, v in self.items(): if 'source' in v: docs[v.source].append(dict(v.items())) return docs
[ "def", "group_by_source", "(", "self", ")", ":", "from", "collections", "import", "defaultdict", "docs", "=", "defaultdict", "(", "list", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ":", "if", "'source'", "in", "v", ":", "docs", ...
Return a dict of all of the docs, with the source associated with the doc as a key
[ "Return", "a", "dict", "of", "all", "of", "the", "docs", "with", "the", "source", "associated", "with", "the", "doc", "as", "a", "key" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/metadata/schema.py#L88-L98
twisted/epsilon
epsilon/hotfixes/filepath_copyTo.py
FilePath.preauthChild
def preauthChild(self, path): """ Use me if `path' might have slashes in it, but you know they're safe. (NOT slashes at the beginning. It still needs to be a _child_). """ newpath = abspath(joinpath(self.path, normpath(path))) if not newpath.startswith(self.path): raise InsecurePath("%s is not a child of %s" % (newpath, self.path)) return self.clonePath(newpath)
python
def preauthChild(self, path): """ Use me if `path' might have slashes in it, but you know they're safe. (NOT slashes at the beginning. It still needs to be a _child_). """ newpath = abspath(joinpath(self.path, normpath(path))) if not newpath.startswith(self.path): raise InsecurePath("%s is not a child of %s" % (newpath, self.path)) return self.clonePath(newpath)
[ "def", "preauthChild", "(", "self", ",", "path", ")", ":", "newpath", "=", "abspath", "(", "joinpath", "(", "self", ".", "path", ",", "normpath", "(", "path", ")", ")", ")", "if", "not", "newpath", ".", "startswith", "(", "self", ".", "path", ")", ...
Use me if `path' might have slashes in it, but you know they're safe. (NOT slashes at the beginning. It still needs to be a _child_).
[ "Use", "me", "if", "path", "might", "have", "slashes", "in", "it", "but", "you", "know", "they", "re", "safe", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/filepath_copyTo.py#L96-L105
twisted/epsilon
epsilon/hotfixes/filepath_copyTo.py
FilePath.childSearchPreauth
def childSearchPreauth(self, *paths): """Return my first existing child with a name in 'paths'. paths is expected to be a list of *pre-secured* path fragments; in most cases this will be specified by a system administrator and not an arbitrary user. If no appropriately-named children exist, this will return None. """ p = self.path for child in paths: jp = joinpath(p, child) if exists(jp): return self.clonePath(jp)
python
def childSearchPreauth(self, *paths): """Return my first existing child with a name in 'paths'. paths is expected to be a list of *pre-secured* path fragments; in most cases this will be specified by a system administrator and not an arbitrary user. If no appropriately-named children exist, this will return None. """ p = self.path for child in paths: jp = joinpath(p, child) if exists(jp): return self.clonePath(jp)
[ "def", "childSearchPreauth", "(", "self", ",", "*", "paths", ")", ":", "p", "=", "self", ".", "path", "for", "child", "in", "paths", ":", "jp", "=", "joinpath", "(", "p", ",", "child", ")", "if", "exists", "(", "jp", ")", ":", "return", "self", "...
Return my first existing child with a name in 'paths'. paths is expected to be a list of *pre-secured* path fragments; in most cases this will be specified by a system administrator and not an arbitrary user. If no appropriately-named children exist, this will return None.
[ "Return", "my", "first", "existing", "child", "with", "a", "name", "in", "paths", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/filepath_copyTo.py#L107-L120
twisted/epsilon
epsilon/hotfixes/filepath_copyTo.py
FilePath.siblingExtensionSearch
def siblingExtensionSearch(self, *exts): """Attempt to return a path with my name, given multiple possible extensions. Each extension in exts will be tested and the first path which exists will be returned. If no path exists, None will be returned. If '' is in exts, then if the file referred to by this path exists, 'self' will be returned. The extension '*' has a magic meaning, which means "any path that begins with self.path+'.' is acceptable". """ p = self.path for ext in exts: if not ext and self.exists(): return self if ext == '*': basedot = basename(p)+'.' for fn in listdir(dirname(p)): if fn.startswith(basedot): return self.clonePath(joinpath(dirname(p), fn)) p2 = p + ext if exists(p2): return self.clonePath(p2)
python
def siblingExtensionSearch(self, *exts): """Attempt to return a path with my name, given multiple possible extensions. Each extension in exts will be tested and the first path which exists will be returned. If no path exists, None will be returned. If '' is in exts, then if the file referred to by this path exists, 'self' will be returned. The extension '*' has a magic meaning, which means "any path that begins with self.path+'.' is acceptable". """ p = self.path for ext in exts: if not ext and self.exists(): return self if ext == '*': basedot = basename(p)+'.' for fn in listdir(dirname(p)): if fn.startswith(basedot): return self.clonePath(joinpath(dirname(p), fn)) p2 = p + ext if exists(p2): return self.clonePath(p2)
[ "def", "siblingExtensionSearch", "(", "self", ",", "*", "exts", ")", ":", "p", "=", "self", ".", "path", "for", "ext", "in", "exts", ":", "if", "not", "ext", "and", "self", ".", "exists", "(", ")", ":", "return", "self", "if", "ext", "==", "'*'", ...
Attempt to return a path with my name, given multiple possible extensions. Each extension in exts will be tested and the first path which exists will be returned. If no path exists, None will be returned. If '' is in exts, then if the file referred to by this path exists, 'self' will be returned. The extension '*' has a magic meaning, which means "any path that begins with self.path+'.' is acceptable".
[ "Attempt", "to", "return", "a", "path", "with", "my", "name", "given", "multiple", "possible", "extensions", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/filepath_copyTo.py#L122-L145
twisted/epsilon
epsilon/hotfixes/filepath_copyTo.py
FilePath.globChildren
def globChildren(self, pattern): """ Assuming I am representing a directory, return a list of FilePaths representing my children that match the given pattern. """ import glob path = self.path[-1] == '/' and self.path + pattern or slash.join([self.path, pattern]) return map(self.clonePath, glob.glob(path))
python
def globChildren(self, pattern): """ Assuming I am representing a directory, return a list of FilePaths representing my children that match the given pattern. """ import glob path = self.path[-1] == '/' and self.path + pattern or slash.join([self.path, pattern]) return map(self.clonePath, glob.glob(path))
[ "def", "globChildren", "(", "self", ",", "pattern", ")", ":", "import", "glob", "path", "=", "self", ".", "path", "[", "-", "1", "]", "==", "'/'", "and", "self", ".", "path", "+", "pattern", "or", "slash", ".", "join", "(", "[", "self", ".", "pat...
Assuming I am representing a directory, return a list of FilePaths representing my children that match the given pattern.
[ "Assuming", "I", "am", "representing", "a", "directory", "return", "a", "list", "of", "FilePaths", "representing", "my", "children", "that", "match", "the", "given", "pattern", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/filepath_copyTo.py#L261-L269
twisted/epsilon
epsilon/hotfixes/filepath_copyTo.py
FilePath.create
def create(self): """Exclusively create a file, only if this file previously did not exist. """ fdint = os.open(self.path, (os.O_EXCL | os.O_CREAT | os.O_RDWR)) # XXX TODO: 'name' attribute of returned files is not mutable or # settable via fdopen, so this file is slighly less functional than the # one returned from 'open' by default. send a patch to Python... return os.fdopen(fdint, 'w+b')
python
def create(self): """Exclusively create a file, only if this file previously did not exist. """ fdint = os.open(self.path, (os.O_EXCL | os.O_CREAT | os.O_RDWR)) # XXX TODO: 'name' attribute of returned files is not mutable or # settable via fdopen, so this file is slighly less functional than the # one returned from 'open' by default. send a patch to Python... return os.fdopen(fdint, 'w+b')
[ "def", "create", "(", "self", ")", ":", "fdint", "=", "os", ".", "open", "(", "self", ".", "path", ",", "(", "os", ".", "O_EXCL", "|", "os", ".", "O_CREAT", "|", "os", ".", "O_RDWR", ")", ")", "# XXX TODO: 'name' attribute of returned files is not mutable ...
Exclusively create a file, only if this file previously did not exist.
[ "Exclusively", "create", "a", "file", "only", "if", "this", "file", "previously", "did", "not", "exist", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/filepath_copyTo.py#L301-L312
twisted/epsilon
epsilon/hotfixes/filepath_copyTo.py
FilePath.temporarySibling
def temporarySibling(self): """ Create a path naming a temporary sibling of this path in a secure fashion. """ sib = self.parent().child(_secureEnoughString() + self.basename()) sib.requireCreate() return sib
python
def temporarySibling(self): """ Create a path naming a temporary sibling of this path in a secure fashion. """ sib = self.parent().child(_secureEnoughString() + self.basename()) sib.requireCreate() return sib
[ "def", "temporarySibling", "(", "self", ")", ":", "sib", "=", "self", ".", "parent", "(", ")", ".", "child", "(", "_secureEnoughString", "(", ")", "+", "self", ".", "basename", "(", ")", ")", "sib", ".", "requireCreate", "(", ")", "return", "sib" ]
Create a path naming a temporary sibling of this path in a secure fashion.
[ "Create", "a", "path", "naming", "a", "temporary", "sibling", "of", "this", "path", "in", "a", "secure", "fashion", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/filepath_copyTo.py#L314-L320
twisted/epsilon
epsilon/scripts/benchmark.py
parseDiskStatLine
def parseDiskStatLine(L): """ Parse a single line from C{/proc/diskstats} into a two-tuple of the name of the device to which it corresponds (ie 'hda') and an instance of the appropriate record type (either L{partitionstat} or L{diskstat}). """ parts = L.split() device = parts[2] if len(parts) == 7: factory = partitionstat else: factory = diskstat return device, factory(*map(int, parts[3:]))
python
def parseDiskStatLine(L): """ Parse a single line from C{/proc/diskstats} into a two-tuple of the name of the device to which it corresponds (ie 'hda') and an instance of the appropriate record type (either L{partitionstat} or L{diskstat}). """ parts = L.split() device = parts[2] if len(parts) == 7: factory = partitionstat else: factory = diskstat return device, factory(*map(int, parts[3:]))
[ "def", "parseDiskStatLine", "(", "L", ")", ":", "parts", "=", "L", ".", "split", "(", ")", "device", "=", "parts", "[", "2", "]", "if", "len", "(", "parts", ")", "==", "7", ":", "factory", "=", "partitionstat", "else", ":", "factory", "=", "disksta...
Parse a single line from C{/proc/diskstats} into a two-tuple of the name of the device to which it corresponds (ie 'hda') and an instance of the appropriate record type (either L{partitionstat} or L{diskstat}).
[ "Parse", "a", "single", "line", "from", "C", "{", "/", "proc", "/", "diskstats", "}", "into", "a", "two", "-", "tuple", "of", "the", "name", "of", "the", "device", "to", "which", "it", "corresponds", "(", "ie", "hda", ")", "and", "an", "instance", ...
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/scripts/benchmark.py#L42-L54
twisted/epsilon
epsilon/scripts/benchmark.py
discoverCurrentWorkingDevice
def discoverCurrentWorkingDevice(procMounts='/proc/self/mounts'): """ Return a short string naming the device which backs the current working directory, ie C{/dev/hda1}. """ possibilities = [] cwd = os.getcwd() with file(procMounts, 'rb') as f: for L in f: parts = L.split() if cwd.startswith(parts[1]): possibilities.append((len(parts[1]), parts[0])) possibilities.sort() try: return possibilities[-1][-1] except IndexError: return '<unknown>'
python
def discoverCurrentWorkingDevice(procMounts='/proc/self/mounts'): """ Return a short string naming the device which backs the current working directory, ie C{/dev/hda1}. """ possibilities = [] cwd = os.getcwd() with file(procMounts, 'rb') as f: for L in f: parts = L.split() if cwd.startswith(parts[1]): possibilities.append((len(parts[1]), parts[0])) possibilities.sort() try: return possibilities[-1][-1] except IndexError: return '<unknown>'
[ "def", "discoverCurrentWorkingDevice", "(", "procMounts", "=", "'/proc/self/mounts'", ")", ":", "possibilities", "=", "[", "]", "cwd", "=", "os", ".", "getcwd", "(", ")", "with", "file", "(", "procMounts", ",", "'rb'", ")", "as", "f", ":", "for", "L", "i...
Return a short string naming the device which backs the current working directory, ie C{/dev/hda1}.
[ "Return", "a", "short", "string", "naming", "the", "device", "which", "backs", "the", "current", "working", "directory", "ie", "C", "{", "/", "dev", "/", "hda1", "}", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/scripts/benchmark.py#L415-L431
twisted/epsilon
epsilon/scripts/benchmark.py
makeBenchmarkRunner
def makeBenchmarkRunner(path, args): """ Make a function that will run two Python processes serially: first one which calls the setup function from the given file, then one which calls the execute function from the given file. """ def runner(): return BenchmarkProcess.spawn( executable=sys.executable, args=['-Wignore'] + args, path=path.path, env=os.environ) return runner
python
def makeBenchmarkRunner(path, args): """ Make a function that will run two Python processes serially: first one which calls the setup function from the given file, then one which calls the execute function from the given file. """ def runner(): return BenchmarkProcess.spawn( executable=sys.executable, args=['-Wignore'] + args, path=path.path, env=os.environ) return runner
[ "def", "makeBenchmarkRunner", "(", "path", ",", "args", ")", ":", "def", "runner", "(", ")", ":", "return", "BenchmarkProcess", ".", "spawn", "(", "executable", "=", "sys", ".", "executable", ",", "args", "=", "[", "'-Wignore'", "]", "+", "args", ",", ...
Make a function that will run two Python processes serially: first one which calls the setup function from the given file, then one which calls the execute function from the given file.
[ "Make", "a", "function", "that", "will", "run", "two", "Python", "processes", "serially", ":", "first", "one", "which", "calls", "the", "setup", "function", "from", "the", "given", "file", "then", "one", "which", "calls", "the", "execute", "function", "from"...
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/scripts/benchmark.py#L514-L526
twisted/epsilon
epsilon/scripts/benchmark.py
start
def start(): """ Start recording stats. Call this from a benchmark script when your setup is done. Call this at most once. @raise RuntimeError: Raised if the parent process responds with anything other than an acknowledgement of this message. """ os.write(BenchmarkProcess.BACKCHANNEL_OUT, BenchmarkProcess.START) response = util.untilConcludes(os.read, BenchmarkProcess.BACKCHANNEL_IN, 1) if response != BenchmarkProcess.START: raise RuntimeError( "Parent process responded with %r instead of START " % (response,))
python
def start(): """ Start recording stats. Call this from a benchmark script when your setup is done. Call this at most once. @raise RuntimeError: Raised if the parent process responds with anything other than an acknowledgement of this message. """ os.write(BenchmarkProcess.BACKCHANNEL_OUT, BenchmarkProcess.START) response = util.untilConcludes(os.read, BenchmarkProcess.BACKCHANNEL_IN, 1) if response != BenchmarkProcess.START: raise RuntimeError( "Parent process responded with %r instead of START " % (response,))
[ "def", "start", "(", ")", ":", "os", ".", "write", "(", "BenchmarkProcess", ".", "BACKCHANNEL_OUT", ",", "BenchmarkProcess", ".", "START", ")", "response", "=", "util", ".", "untilConcludes", "(", "os", ".", "read", ",", "BenchmarkProcess", ".", "BACKCHANNEL...
Start recording stats. Call this from a benchmark script when your setup is done. Call this at most once. @raise RuntimeError: Raised if the parent process responds with anything other than an acknowledgement of this message.
[ "Start", "recording", "stats", ".", "Call", "this", "from", "a", "benchmark", "script", "when", "your", "setup", "is", "done", ".", "Call", "this", "at", "most", "once", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/scripts/benchmark.py#L530-L542
twisted/epsilon
epsilon/scripts/benchmark.py
stop
def stop(): """ Stop recording stats. Call this from a benchmark script when the code you want benchmarked has finished. Call this exactly the same number of times you call L{start} and only after calling it. @raise RuntimeError: Raised if the parent process responds with anything other than an acknowledgement of this message. """ os.write(BenchmarkProcess.BACKCHANNEL_OUT, BenchmarkProcess.STOP) response = util.untilConcludes(os.read, BenchmarkProcess.BACKCHANNEL_IN, 1) if response != BenchmarkProcess.STOP: raise RuntimeError( "Parent process responded with %r instead of STOP" % (response,))
python
def stop(): """ Stop recording stats. Call this from a benchmark script when the code you want benchmarked has finished. Call this exactly the same number of times you call L{start} and only after calling it. @raise RuntimeError: Raised if the parent process responds with anything other than an acknowledgement of this message. """ os.write(BenchmarkProcess.BACKCHANNEL_OUT, BenchmarkProcess.STOP) response = util.untilConcludes(os.read, BenchmarkProcess.BACKCHANNEL_IN, 1) if response != BenchmarkProcess.STOP: raise RuntimeError( "Parent process responded with %r instead of STOP" % (response,))
[ "def", "stop", "(", ")", ":", "os", ".", "write", "(", "BenchmarkProcess", ".", "BACKCHANNEL_OUT", ",", "BenchmarkProcess", ".", "STOP", ")", "response", "=", "util", ".", "untilConcludes", "(", "os", ".", "read", ",", "BenchmarkProcess", ".", "BACKCHANNEL_I...
Stop recording stats. Call this from a benchmark script when the code you want benchmarked has finished. Call this exactly the same number of times you call L{start} and only after calling it. @raise RuntimeError: Raised if the parent process responds with anything other than an acknowledgement of this message.
[ "Stop", "recording", "stats", ".", "Call", "this", "from", "a", "benchmark", "script", "when", "the", "code", "you", "want", "benchmarked", "has", "finished", ".", "Call", "this", "exactly", "the", "same", "number", "of", "times", "you", "call", "L", "{", ...
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/scripts/benchmark.py#L546-L559
twisted/epsilon
epsilon/scripts/benchmark.py
main
def main(): """ Run me with the filename of a benchmark script as an argument. I will time it and append the results to a file named output in the current working directory. """ name = sys.argv[1] path = filepath.FilePath('.stat').temporarySibling() path.makedirs() func = makeBenchmarkRunner(path, sys.argv[1:]) try: bench(name, path, func) finally: path.remove()
python
def main(): """ Run me with the filename of a benchmark script as an argument. I will time it and append the results to a file named output in the current working directory. """ name = sys.argv[1] path = filepath.FilePath('.stat').temporarySibling() path.makedirs() func = makeBenchmarkRunner(path, sys.argv[1:]) try: bench(name, path, func) finally: path.remove()
[ "def", "main", "(", ")", ":", "name", "=", "sys", ".", "argv", "[", "1", "]", "path", "=", "filepath", ".", "FilePath", "(", "'.stat'", ")", ".", "temporarySibling", "(", ")", "path", ".", "makedirs", "(", ")", "func", "=", "makeBenchmarkRunner", "("...
Run me with the filename of a benchmark script as an argument. I will time it and append the results to a file named output in the current working directory.
[ "Run", "me", "with", "the", "filename", "of", "a", "benchmark", "script", "as", "an", "argument", ".", "I", "will", "time", "it", "and", "append", "the", "results", "to", "a", "file", "named", "output", "in", "the", "current", "working", "directory", "."...
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/scripts/benchmark.py#L563-L576
twisted/epsilon
epsilon/scripts/benchmark.py
BasicProcess.spawn
def spawn(cls, executable, args, path, env, spawnProcess=None): """ Run an executable with some arguments in the given working directory with the given environment variables. Returns a Deferred which fires with a two-tuple of (exit status, output list) if the process terminates without timing out or being killed by a signal. Otherwise, the Deferred errbacks with either L{error.TimeoutError} if any 10 minute period passes with no events or L{ProcessDied} if it is killed by a signal. On success, the output list is of two-tuples of (file descriptor, bytes). """ d = defer.Deferred() proto = cls(d, filepath.FilePath(path)) if spawnProcess is None: spawnProcess = reactor.spawnProcess spawnProcess( proto, executable, [executable] + args, path=path, env=env, childFDs={0: 'w', 1: 'r', 2: 'r', cls.BACKCHANNEL_OUT: 'r', cls.BACKCHANNEL_IN: 'w'}) return d
python
def spawn(cls, executable, args, path, env, spawnProcess=None): """ Run an executable with some arguments in the given working directory with the given environment variables. Returns a Deferred which fires with a two-tuple of (exit status, output list) if the process terminates without timing out or being killed by a signal. Otherwise, the Deferred errbacks with either L{error.TimeoutError} if any 10 minute period passes with no events or L{ProcessDied} if it is killed by a signal. On success, the output list is of two-tuples of (file descriptor, bytes). """ d = defer.Deferred() proto = cls(d, filepath.FilePath(path)) if spawnProcess is None: spawnProcess = reactor.spawnProcess spawnProcess( proto, executable, [executable] + args, path=path, env=env, childFDs={0: 'w', 1: 'r', 2: 'r', cls.BACKCHANNEL_OUT: 'r', cls.BACKCHANNEL_IN: 'w'}) return d
[ "def", "spawn", "(", "cls", ",", "executable", ",", "args", ",", "path", ",", "env", ",", "spawnProcess", "=", "None", ")", ":", "d", "=", "defer", ".", "Deferred", "(", ")", "proto", "=", "cls", "(", "d", ",", "filepath", ".", "FilePath", "(", "...
Run an executable with some arguments in the given working directory with the given environment variables. Returns a Deferred which fires with a two-tuple of (exit status, output list) if the process terminates without timing out or being killed by a signal. Otherwise, the Deferred errbacks with either L{error.TimeoutError} if any 10 minute period passes with no events or L{ProcessDied} if it is killed by a signal. On success, the output list is of two-tuples of (file descriptor, bytes).
[ "Run", "an", "executable", "with", "some", "arguments", "in", "the", "given", "working", "directory", "with", "the", "given", "environment", "variables", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/scripts/benchmark.py#L166-L192
scholrly/orcid-python
orcid/rest.py
get
def get(orcid_id): """ Get an author based on an ORCID identifier. """ resp = requests.get(ORCID_PUBLIC_BASE_URL + unicode(orcid_id), headers=BASE_HEADERS) json_body = resp.json() return Author(json_body)
python
def get(orcid_id): """ Get an author based on an ORCID identifier. """ resp = requests.get(ORCID_PUBLIC_BASE_URL + unicode(orcid_id), headers=BASE_HEADERS) json_body = resp.json() return Author(json_body)
[ "def", "get", "(", "orcid_id", ")", ":", "resp", "=", "requests", ".", "get", "(", "ORCID_PUBLIC_BASE_URL", "+", "unicode", "(", "orcid_id", ")", ",", "headers", "=", "BASE_HEADERS", ")", "json_body", "=", "resp", ".", "json", "(", ")", "return", "Author...
Get an author based on an ORCID identifier.
[ "Get", "an", "author", "based", "on", "an", "ORCID", "identifier", "." ]
train
https://github.com/scholrly/orcid-python/blob/71311ca708689740e99d447716d6f22f1291f6f8/orcid/rest.py#L119-L126
twisted/epsilon
epsilon/structlike.py
_contextualize
def _contextualize(contextFactory, contextReceiver): """ Invoke a callable with an argument derived from the current execution context (L{twisted.python.context}), or automatically created if none is yet present in the current context. This function, with a better name and documentation, should probably be somewhere in L{twisted.python.context}. Calling context.get() and context.call() individually is perilous because you always have to handle the case where the value you're looking for isn't present; this idiom forces you to supply some behavior for that case. @param contextFactory: An object which is both a 0-arg callable and hashable; used to look up the value in the context, set the value in the context, and create the value (by being called). @param contextReceiver: A function that receives the value created or identified by contextFactory. It is a 1-arg callable object, called with the result of calling the contextFactory, or retrieving the contextFactory from the context. """ value = context.get(contextFactory, _NOT_SPECIFIED) if value is not _NOT_SPECIFIED: return contextReceiver(value) else: return context.call({contextFactory: contextFactory()}, _contextualize, contextFactory, contextReceiver)
python
def _contextualize(contextFactory, contextReceiver): """ Invoke a callable with an argument derived from the current execution context (L{twisted.python.context}), or automatically created if none is yet present in the current context. This function, with a better name and documentation, should probably be somewhere in L{twisted.python.context}. Calling context.get() and context.call() individually is perilous because you always have to handle the case where the value you're looking for isn't present; this idiom forces you to supply some behavior for that case. @param contextFactory: An object which is both a 0-arg callable and hashable; used to look up the value in the context, set the value in the context, and create the value (by being called). @param contextReceiver: A function that receives the value created or identified by contextFactory. It is a 1-arg callable object, called with the result of calling the contextFactory, or retrieving the contextFactory from the context. """ value = context.get(contextFactory, _NOT_SPECIFIED) if value is not _NOT_SPECIFIED: return contextReceiver(value) else: return context.call({contextFactory: contextFactory()}, _contextualize, contextFactory, contextReceiver)
[ "def", "_contextualize", "(", "contextFactory", ",", "contextReceiver", ")", ":", "value", "=", "context", ".", "get", "(", "contextFactory", ",", "_NOT_SPECIFIED", ")", "if", "value", "is", "not", "_NOT_SPECIFIED", ":", "return", "contextReceiver", "(", "value"...
Invoke a callable with an argument derived from the current execution context (L{twisted.python.context}), or automatically created if none is yet present in the current context. This function, with a better name and documentation, should probably be somewhere in L{twisted.python.context}. Calling context.get() and context.call() individually is perilous because you always have to handle the case where the value you're looking for isn't present; this idiom forces you to supply some behavior for that case. @param contextFactory: An object which is both a 0-arg callable and hashable; used to look up the value in the context, set the value in the context, and create the value (by being called). @param contextReceiver: A function that receives the value created or identified by contextFactory. It is a 1-arg callable object, called with the result of calling the contextFactory, or retrieving the contextFactory from the context.
[ "Invoke", "a", "callable", "with", "an", "argument", "derived", "from", "the", "current", "execution", "context", "(", "L", "{", "twisted", ".", "python", ".", "context", "}", ")", "or", "automatically", "created", "if", "none", "is", "yet", "present", "in...
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/structlike.py#L36-L62
twisted/epsilon
epsilon/structlike.py
record
def record(*a, **kw): """ Are you tired of typing class declarations that look like this:: class StuffInfo: def __init__(self, a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None): self.a = a self.b = b self.c = c self.d = d # ... Epsilon can help! That's right - for a limited time only, this function returns a class which provides a shortcut. The above can be simplified to:: StuffInfo = record(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None) if the arguments are required, rather than having defaults, it could be even shorter:: StuffInfo = record('a b c d e f g h i j') Put more formally: C{record} optionally takes one positional argument, a L{str} representing attribute names as whitespace-separated identifiers; it also takes an arbitrary number of keyword arguments, which map attribute names to their default values. If no positional argument is provided, the names of attributes will be inferred from the names of the defaults instead. """ if len(a) == 1: attributeNames = a[0].split() elif len(a) == 0: if not kw: raise TypeError("Attempted to define a record with no attributes.") attributeNames = kw.keys() attributeNames.sort() else: raise TypeError( "record must be called with zero or one positional arguments") # Work like Python: allow defaults specified backwards from the end defaults = [] for attributeName in attributeNames: default = kw.pop(attributeName, _NOT_SPECIFIED) if defaults: if default is _NOT_SPECIFIED: raise TypeError( "You must specify default values like in Python; " "backwards from the end of the argument list, " "with no gaps") else: defaults.append(default) elif default is not _NOT_SPECIFIED: defaults.append(default) else: # This space left intentionally blank. pass if kw: raise TypeError("The following defaults did not apply: %r" % (kw,)) return type('Record<%s>' % (' '.join(attributeNames),), (StructBehavior,), dict(__names__=attributeNames, __defaults__=defaults))
python
def record(*a, **kw): """ Are you tired of typing class declarations that look like this:: class StuffInfo: def __init__(self, a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None): self.a = a self.b = b self.c = c self.d = d # ... Epsilon can help! That's right - for a limited time only, this function returns a class which provides a shortcut. The above can be simplified to:: StuffInfo = record(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None) if the arguments are required, rather than having defaults, it could be even shorter:: StuffInfo = record('a b c d e f g h i j') Put more formally: C{record} optionally takes one positional argument, a L{str} representing attribute names as whitespace-separated identifiers; it also takes an arbitrary number of keyword arguments, which map attribute names to their default values. If no positional argument is provided, the names of attributes will be inferred from the names of the defaults instead. """ if len(a) == 1: attributeNames = a[0].split() elif len(a) == 0: if not kw: raise TypeError("Attempted to define a record with no attributes.") attributeNames = kw.keys() attributeNames.sort() else: raise TypeError( "record must be called with zero or one positional arguments") # Work like Python: allow defaults specified backwards from the end defaults = [] for attributeName in attributeNames: default = kw.pop(attributeName, _NOT_SPECIFIED) if defaults: if default is _NOT_SPECIFIED: raise TypeError( "You must specify default values like in Python; " "backwards from the end of the argument list, " "with no gaps") else: defaults.append(default) elif default is not _NOT_SPECIFIED: defaults.append(default) else: # This space left intentionally blank. pass if kw: raise TypeError("The following defaults did not apply: %r" % (kw,)) return type('Record<%s>' % (' '.join(attributeNames),), (StructBehavior,), dict(__names__=attributeNames, __defaults__=defaults))
[ "def", "record", "(", "*", "a", ",", "*", "*", "kw", ")", ":", "if", "len", "(", "a", ")", "==", "1", ":", "attributeNames", "=", "a", "[", "0", "]", ".", "split", "(", ")", "elif", "len", "(", "a", ")", "==", "0", ":", "if", "not", "kw",...
Are you tired of typing class declarations that look like this:: class StuffInfo: def __init__(self, a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None): self.a = a self.b = b self.c = c self.d = d # ... Epsilon can help! That's right - for a limited time only, this function returns a class which provides a shortcut. The above can be simplified to:: StuffInfo = record(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None) if the arguments are required, rather than having defaults, it could be even shorter:: StuffInfo = record('a b c d e f g h i j') Put more formally: C{record} optionally takes one positional argument, a L{str} representing attribute names as whitespace-separated identifiers; it also takes an arbitrary number of keyword arguments, which map attribute names to their default values. If no positional argument is provided, the names of attributes will be inferred from the names of the defaults instead.
[ "Are", "you", "tired", "of", "typing", "class", "declarations", "that", "look", "like", "this", "::" ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/structlike.py#L117-L183
twisted/epsilon
epsilon/structlike.py
_RecursiveReprer.recursiveRepr
def recursiveRepr(self, stuff, thunk=repr): """ Recursive repr(). """ ID = id(stuff) if ID in self.active: return '%s(...)' % (stuff.__class__.__name__,) else: try: self.active[ID] = stuff return thunk(stuff) finally: del self.active[ID]
python
def recursiveRepr(self, stuff, thunk=repr): """ Recursive repr(). """ ID = id(stuff) if ID in self.active: return '%s(...)' % (stuff.__class__.__name__,) else: try: self.active[ID] = stuff return thunk(stuff) finally: del self.active[ID]
[ "def", "recursiveRepr", "(", "self", ",", "stuff", ",", "thunk", "=", "repr", ")", ":", "ID", "=", "id", "(", "stuff", ")", "if", "ID", "in", "self", ".", "active", ":", "return", "'%s(...)'", "%", "(", "stuff", ".", "__class__", ".", "__name__", "...
Recursive repr().
[ "Recursive", "repr", "()", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/structlike.py#L21-L33
cosven/feeluown-core
fuocore/lyric.py
parse
def parse(content): """ Reference: https://github.com/osdlyrics/osdlyrics/blob/master/python/lrc.py >>> parse("[00:00.00] 作曲 : 周杰伦\\n[00:01.00] 作词 : 周杰伦\\n") {0.0: ' 作曲 : 周杰伦', 1000.0: ' 作词 : 周杰伦'} """ ms_sentence_map = dict() sentence_pattern = re.compile(r'\[(\d+(:\d+){0,2}(\.\d+)?)\]') lines = content.splitlines() for line in lines: m = sentence_pattern.search(line, 0) if m: time_str = m.group(1) mileseconds = 0 unit = 1000 t_seq = time_str.split(':') t_seq.reverse() for num in t_seq: mileseconds += float(num) * unit unit *= 60 sentence = line[m.end():] ms_sentence_map[mileseconds] = sentence return ms_sentence_map
python
def parse(content): """ Reference: https://github.com/osdlyrics/osdlyrics/blob/master/python/lrc.py >>> parse("[00:00.00] 作曲 : 周杰伦\\n[00:01.00] 作词 : 周杰伦\\n") {0.0: ' 作曲 : 周杰伦', 1000.0: ' 作词 : 周杰伦'} """ ms_sentence_map = dict() sentence_pattern = re.compile(r'\[(\d+(:\d+){0,2}(\.\d+)?)\]') lines = content.splitlines() for line in lines: m = sentence_pattern.search(line, 0) if m: time_str = m.group(1) mileseconds = 0 unit = 1000 t_seq = time_str.split(':') t_seq.reverse() for num in t_seq: mileseconds += float(num) * unit unit *= 60 sentence = line[m.end():] ms_sentence_map[mileseconds] = sentence return ms_sentence_map
[ "def", "parse", "(", "content", ")", ":", "ms_sentence_map", "=", "dict", "(", ")", "sentence_pattern", "=", "re", ".", "compile", "(", "r'\\[(\\d+(:\\d+){0,2}(\\.\\d+)?)\\]'", ")", "lines", "=", "content", ".", "splitlines", "(", ")", "for", "line", "in", "...
Reference: https://github.com/osdlyrics/osdlyrics/blob/master/python/lrc.py >>> parse("[00:00.00] 作曲 : 周杰伦\\n[00:01.00] 作词 : 周杰伦\\n") {0.0: ' 作曲 : 周杰伦', 1000.0: ' 作词 : 周杰伦'}
[ "Reference", ":", "https", ":", "//", "github", ".", "com", "/", "osdlyrics", "/", "osdlyrics", "/", "blob", "/", "master", "/", "python", "/", "lrc", ".", "py" ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/lyric.py#L6-L29
project-ncl/pnc-cli
pnc_cli/products.py
create_product
def create_product(name, abbreviation, **kwargs): """ Create a new Product """ data = create_product_raw(name, abbreviation, **kwargs) if data: return utils.format_json(data)
python
def create_product(name, abbreviation, **kwargs): """ Create a new Product """ data = create_product_raw(name, abbreviation, **kwargs) if data: return utils.format_json(data)
[ "def", "create_product", "(", "name", ",", "abbreviation", ",", "*", "*", "kwargs", ")", ":", "data", "=", "create_product_raw", "(", "name", ",", "abbreviation", ",", "*", "*", "kwargs", ")", "if", "data", ":", "return", "utils", ".", "format_json", "("...
Create a new Product
[ "Create", "a", "new", "Product" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/products.py#L28-L34
project-ncl/pnc-cli
pnc_cli/products.py
update_product
def update_product(product_id, **kwargs): """ Update a Product with new information """ content = update_product_raw(product_id, **kwargs) if content: return utils.format_json(content)
python
def update_product(product_id, **kwargs): """ Update a Product with new information """ content = update_product_raw(product_id, **kwargs) if content: return utils.format_json(content)
[ "def", "update_product", "(", "product_id", ",", "*", "*", "kwargs", ")", ":", "content", "=", "update_product_raw", "(", "product_id", ",", "*", "*", "kwargs", ")", "if", "content", ":", "return", "utils", ".", "format_json", "(", "content", ")" ]
Update a Product with new information
[ "Update", "a", "Product", "with", "new", "information" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/products.py#L51-L57
project-ncl/pnc-cli
pnc_cli/products.py
get_product
def get_product(id=None, name=None): """ Get a specific Product by name or ID """ content = get_product_raw(id, name) if content: return utils.format_json(content)
python
def get_product(id=None, name=None): """ Get a specific Product by name or ID """ content = get_product_raw(id, name) if content: return utils.format_json(content)
[ "def", "get_product", "(", "id", "=", "None", ",", "name", "=", "None", ")", ":", "content", "=", "get_product_raw", "(", "id", ",", "name", ")", "if", "content", ":", "return", "utils", ".", "format_json", "(", "content", ")" ]
Get a specific Product by name or ID
[ "Get", "a", "specific", "Product", "by", "name", "or", "ID" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/products.py#L74-L80
project-ncl/pnc-cli
pnc_cli/products.py
list_versions_for_product
def list_versions_for_product(id=None, name=None, page_size=200, page_index=0, sort='', q=''): """ List all ProductVersions for a given Product """ content = list_versions_for_product_raw(id, name, page_size, page_index, sort, q) if content: return utils.format_json_list(content)
python
def list_versions_for_product(id=None, name=None, page_size=200, page_index=0, sort='', q=''): """ List all ProductVersions for a given Product """ content = list_versions_for_product_raw(id, name, page_size, page_index, sort, q) if content: return utils.format_json_list(content)
[ "def", "list_versions_for_product", "(", "id", "=", "None", ",", "name", "=", "None", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "''", ",", "q", "=", "''", ")", ":", "content", "=", "list_versions_for_product_raw", "(", ...
List all ProductVersions for a given Product
[ "List", "all", "ProductVersions", "for", "a", "given", "Product" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/products.py#L97-L103
project-ncl/pnc-cli
pnc_cli/products.py
list_products
def list_products(page_size=200, page_index=0, sort="", q=""): """ List all Products """ content = list_products_raw(page_size, page_index, sort, q) if content: return utils.format_json_list(content)
python
def list_products(page_size=200, page_index=0, sort="", q=""): """ List all Products """ content = list_products_raw(page_size, page_index, sort, q) if content: return utils.format_json_list(content)
[ "def", "list_products", "(", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "content", "=", "list_products_raw", "(", "page_size", ",", "page_index", ",", "sort", ",", "q", ")", "if", ...
List all Products
[ "List", "all", "Products" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/products.py#L116-L122
CivicSpleen/ambry
ambry/util/geocoders.py
DstkGeocoder.geocode
def geocode(self): """A Generator that reads from the address generators and returns geocode results. The generator yields ( address, geocode_results, object) """ submit_set = [] data_map = {} for address, o in self.gen: submit_set.append(address) data_map[address] = o if len(submit_set) >= self.submit_size: results = self._send(submit_set) submit_set = [] for k, result in results.items(): o = data_map[k] yield (k, result, o) if len(submit_set) > 0: results = self._send(submit_set) # submit_set = [] for k, result in results.items(): o = data_map[k] yield (k, result, o)
python
def geocode(self): """A Generator that reads from the address generators and returns geocode results. The generator yields ( address, geocode_results, object) """ submit_set = [] data_map = {} for address, o in self.gen: submit_set.append(address) data_map[address] = o if len(submit_set) >= self.submit_size: results = self._send(submit_set) submit_set = [] for k, result in results.items(): o = data_map[k] yield (k, result, o) if len(submit_set) > 0: results = self._send(submit_set) # submit_set = [] for k, result in results.items(): o = data_map[k] yield (k, result, o)
[ "def", "geocode", "(", "self", ")", ":", "submit_set", "=", "[", "]", "data_map", "=", "{", "}", "for", "address", ",", "o", "in", "self", ".", "gen", ":", "submit_set", ".", "append", "(", "address", ")", "data_map", "[", "address", "]", "=", "o",...
A Generator that reads from the address generators and returns geocode results. The generator yields ( address, geocode_results, object)
[ "A", "Generator", "that", "reads", "from", "the", "address", "generators", "and", "returns", "geocode", "results", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/util/geocoders.py#L35-L64
CivicSpleen/ambry
ambry/orm/database.py
migrate
def migrate(connection, dsn): """ Collects all migrations and applies missed. Args: connection (sqlalchemy connection): """ all_migrations = _get_all_migrations() logger.debug('Collected migrations: {}'.format(all_migrations)) for version, modname in all_migrations: if _is_missed(connection, version) and version <= SCHEMA_VERSION: logger.info('Missed migration: {} migration is missed. Migrating...'.format(version)) module = __import__(modname, fromlist='dummy') # run each migration under its own transaction. This allows us to apply valid migrations # and break on invalid. trans = connection.begin() try: module.Migration().migrate(connection) _update_version(connection, version) trans.commit() except: trans.rollback() logger.error("Failed to migrate '{}' on {} ".format(version, dsn)) raise
python
def migrate(connection, dsn): """ Collects all migrations and applies missed. Args: connection (sqlalchemy connection): """ all_migrations = _get_all_migrations() logger.debug('Collected migrations: {}'.format(all_migrations)) for version, modname in all_migrations: if _is_missed(connection, version) and version <= SCHEMA_VERSION: logger.info('Missed migration: {} migration is missed. Migrating...'.format(version)) module = __import__(modname, fromlist='dummy') # run each migration under its own transaction. This allows us to apply valid migrations # and break on invalid. trans = connection.begin() try: module.Migration().migrate(connection) _update_version(connection, version) trans.commit() except: trans.rollback() logger.error("Failed to migrate '{}' on {} ".format(version, dsn)) raise
[ "def", "migrate", "(", "connection", ",", "dsn", ")", ":", "all_migrations", "=", "_get_all_migrations", "(", ")", "logger", ".", "debug", "(", "'Collected migrations: {}'", ".", "format", "(", "all_migrations", ")", ")", "for", "version", ",", "modname", "in"...
Collects all migrations and applies missed. Args: connection (sqlalchemy connection):
[ "Collects", "all", "migrations", "and", "applies", "missed", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L835-L860
CivicSpleen/ambry
ambry/orm/database.py
create_migration_template
def create_migration_template(name): """ Creates migration file. Returns created file name. Args: name (str): name of the migration. Returns: str: name of the migration file. """ assert name, 'Name of the migration can not be empty.' from . import migrations # Find next number # package = migrations prefix = package.__name__ + '.' all_versions = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_versions.append(version) next_number = max(all_versions) + 1 # Generate next migration name # next_migration_name = '{}_{}.py'.format(next_number, name) migration_fullname = os.path.join(package.__path__[0], next_migration_name) # Write next migration file content. # with open(migration_fullname, 'w') as f: f.write(MIGRATION_TEMPLATE) return migration_fullname
python
def create_migration_template(name): """ Creates migration file. Returns created file name. Args: name (str): name of the migration. Returns: str: name of the migration file. """ assert name, 'Name of the migration can not be empty.' from . import migrations # Find next number # package = migrations prefix = package.__name__ + '.' all_versions = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_versions.append(version) next_number = max(all_versions) + 1 # Generate next migration name # next_migration_name = '{}_{}.py'.format(next_number, name) migration_fullname = os.path.join(package.__path__[0], next_migration_name) # Write next migration file content. # with open(migration_fullname, 'w') as f: f.write(MIGRATION_TEMPLATE) return migration_fullname
[ "def", "create_migration_template", "(", "name", ")", ":", "assert", "name", ",", "'Name of the migration can not be empty.'", "from", ".", "import", "migrations", "# Find next number", "#", "package", "=", "migrations", "prefix", "=", "package", ".", "__name__", "+",...
Creates migration file. Returns created file name. Args: name (str): name of the migration. Returns: str: name of the migration file.
[ "Creates", "migration", "file", ".", "Returns", "created", "file", "name", ".", "Args", ":", "name", "(", "str", ")", ":", "name", "of", "the", "migration", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L863-L894
CivicSpleen/ambry
ambry/orm/database.py
get_stored_version
def get_stored_version(connection): """ Returns database version. Args: connection (sqlalchemy connection): Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case) exist because they created with the database creation. Returns: int: version of the database. """ if connection.engine.name == 'sqlite': version = connection.execute('PRAGMA user_version').fetchone()[0] if version == 0: raise VersionIsNotStored return version elif connection.engine.name == 'postgresql': try: r = connection\ .execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME))\ .fetchone() if not r: raise VersionIsNotStored version = r[0] except ProgrammingError: # This happens when the user_version table doesn't exist raise VersionIsNotStored return version else: raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name))
python
def get_stored_version(connection): """ Returns database version. Args: connection (sqlalchemy connection): Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case) exist because they created with the database creation. Returns: int: version of the database. """ if connection.engine.name == 'sqlite': version = connection.execute('PRAGMA user_version').fetchone()[0] if version == 0: raise VersionIsNotStored return version elif connection.engine.name == 'postgresql': try: r = connection\ .execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME))\ .fetchone() if not r: raise VersionIsNotStored version = r[0] except ProgrammingError: # This happens when the user_version table doesn't exist raise VersionIsNotStored return version else: raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name))
[ "def", "get_stored_version", "(", "connection", ")", ":", "if", "connection", ".", "engine", ".", "name", "==", "'sqlite'", ":", "version", "=", "connection", ".", "execute", "(", "'PRAGMA user_version'", ")", ".", "fetchone", "(", ")", "[", "0", "]", "if"...
Returns database version. Args: connection (sqlalchemy connection): Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case) exist because they created with the database creation. Returns: int: version of the database.
[ "Returns", "database", "version", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L897-L931
CivicSpleen/ambry
ambry/orm/database.py
_validate_version
def _validate_version(connection, dsn): """ Performs on-the-fly schema updates based on the models version. Raises: DatabaseError: if user uses old sqlite database. """ try: version = get_stored_version(connection) except VersionIsNotStored: logger.debug('Version not stored in the db: assuming new database creation.') version = SCHEMA_VERSION _update_version(connection, version) assert isinstance(version, int) if version > 10 and version < 100: raise DatabaseError('You are trying to open an old SQLite database.') if _migration_required(connection): migrate(connection, dsn)
python
def _validate_version(connection, dsn): """ Performs on-the-fly schema updates based on the models version. Raises: DatabaseError: if user uses old sqlite database. """ try: version = get_stored_version(connection) except VersionIsNotStored: logger.debug('Version not stored in the db: assuming new database creation.') version = SCHEMA_VERSION _update_version(connection, version) assert isinstance(version, int) if version > 10 and version < 100: raise DatabaseError('You are trying to open an old SQLite database.') if _migration_required(connection): migrate(connection, dsn)
[ "def", "_validate_version", "(", "connection", ",", "dsn", ")", ":", "try", ":", "version", "=", "get_stored_version", "(", "connection", ")", "except", "VersionIsNotStored", ":", "logger", ".", "debug", "(", "'Version not stored in the db: assuming new database creatio...
Performs on-the-fly schema updates based on the models version. Raises: DatabaseError: if user uses old sqlite database.
[ "Performs", "on", "-", "the", "-", "fly", "schema", "updates", "based", "on", "the", "models", "version", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L934-L953
CivicSpleen/ambry
ambry/orm/database.py
_migration_required
def _migration_required(connection): """ Returns True if ambry models do not match to db tables. Otherwise returns False. """ stored_version = get_stored_version(connection) actual_version = SCHEMA_VERSION assert isinstance(stored_version, int) assert isinstance(actual_version, int) assert stored_version <= actual_version, \ 'Db version can not be greater than models version. Update your source code.' return stored_version < actual_version
python
def _migration_required(connection): """ Returns True if ambry models do not match to db tables. Otherwise returns False. """ stored_version = get_stored_version(connection) actual_version = SCHEMA_VERSION assert isinstance(stored_version, int) assert isinstance(actual_version, int) assert stored_version <= actual_version, \ 'Db version can not be greater than models version. Update your source code.' return stored_version < actual_version
[ "def", "_migration_required", "(", "connection", ")", ":", "stored_version", "=", "get_stored_version", "(", "connection", ")", "actual_version", "=", "SCHEMA_VERSION", "assert", "isinstance", "(", "stored_version", ",", "int", ")", "assert", "isinstance", "(", "act...
Returns True if ambry models do not match to db tables. Otherwise returns False.
[ "Returns", "True", "if", "ambry", "models", "do", "not", "match", "to", "db", "tables", ".", "Otherwise", "returns", "False", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L956-L964
CivicSpleen/ambry
ambry/orm/database.py
_update_version
def _update_version(connection, version): """ Updates version in the db to the given version. Args: connection (sqlalchemy connection): sqlalchemy session where to update version. version (int): version of the migration. """ if connection.engine.name == 'sqlite': connection.execute('PRAGMA user_version = {}'.format(version)) elif connection.engine.name == 'postgresql': connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME))) connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME))) connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);' .format(POSTGRES_SCHEMA_NAME)) # upsert. if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone(): # update connection.execute('UPDATE {}.user_version SET version = {};' .format(POSTGRES_SCHEMA_NAME, version)) else: # insert connection.execute('INSERT INTO {}.user_version (version) VALUES ({})' .format(POSTGRES_SCHEMA_NAME, version)) else: raise DatabaseMissingError('Do not know how to migrate {} engine.' .format(connection.engine.driver))
python
def _update_version(connection, version): """ Updates version in the db to the given version. Args: connection (sqlalchemy connection): sqlalchemy session where to update version. version (int): version of the migration. """ if connection.engine.name == 'sqlite': connection.execute('PRAGMA user_version = {}'.format(version)) elif connection.engine.name == 'postgresql': connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME))) connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME))) connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);' .format(POSTGRES_SCHEMA_NAME)) # upsert. if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone(): # update connection.execute('UPDATE {}.user_version SET version = {};' .format(POSTGRES_SCHEMA_NAME, version)) else: # insert connection.execute('INSERT INTO {}.user_version (version) VALUES ({})' .format(POSTGRES_SCHEMA_NAME, version)) else: raise DatabaseMissingError('Do not know how to migrate {} engine.' .format(connection.engine.driver))
[ "def", "_update_version", "(", "connection", ",", "version", ")", ":", "if", "connection", ".", "engine", ".", "name", "==", "'sqlite'", ":", "connection", ".", "execute", "(", "'PRAGMA user_version = {}'", ".", "format", "(", "version", ")", ")", "elif", "c...
Updates version in the db to the given version. Args: connection (sqlalchemy connection): sqlalchemy session where to update version. version (int): version of the migration.
[ "Updates", "version", "in", "the", "db", "to", "the", "given", "version", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L967-L997
CivicSpleen/ambry
ambry/orm/database.py
_get_all_migrations
def _get_all_migrations(): """ Returns sorted list of all migrations. Returns: list of (int, str) tuples: first elem of the tuple is migration number, second if module name. """ from . import migrations package = migrations prefix = package.__name__ + '.' all_migrations = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_migrations.append((version, modname)) all_migrations = sorted(all_migrations, key=lambda x: x[0]) return all_migrations
python
def _get_all_migrations(): """ Returns sorted list of all migrations. Returns: list of (int, str) tuples: first elem of the tuple is migration number, second if module name. """ from . import migrations package = migrations prefix = package.__name__ + '.' all_migrations = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_migrations.append((version, modname)) all_migrations = sorted(all_migrations, key=lambda x: x[0]) return all_migrations
[ "def", "_get_all_migrations", "(", ")", ":", "from", ".", "import", "migrations", "package", "=", "migrations", "prefix", "=", "package", ".", "__name__", "+", "'.'", "all_migrations", "=", "[", "]", "for", "importer", ",", "modname", ",", "ispkg", "in", "...
Returns sorted list of all migrations. Returns: list of (int, str) tuples: first elem of the tuple is migration number, second if module name.
[ "Returns", "sorted", "list", "of", "all", "migrations", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L1013-L1030
CivicSpleen/ambry
ambry/orm/database.py
Database.create
def create(self): """Create the database from the base SQL.""" if not self.exists(): self._create_path() self.create_tables() return True return False
python
def create(self): """Create the database from the base SQL.""" if not self.exists(): self._create_path() self.create_tables() return True return False
[ "def", "create", "(", "self", ")", ":", "if", "not", "self", ".", "exists", "(", ")", ":", "self", ".", "_create_path", "(", ")", "self", ".", "create_tables", "(", ")", "return", "True", "return", "False" ]
Create the database from the base SQL.
[ "Create", "the", "database", "from", "the", "base", "SQL", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L107-L115
CivicSpleen/ambry
ambry/orm/database.py
Database._create_path
def _create_path(self): """Create the path to hold the database, if one wwas specified.""" if self.driver == 'sqlite' and 'memory' not in self.dsn and self.dsn != 'sqlite://': dir_ = os.path.dirname(self.path) if dir_ and not os.path.exists(dir_): try: # Multiple process may try to make, so it could already # exist os.makedirs(dir_) except Exception: pass if not os.path.exists(dir_): raise Exception("Couldn't create directory " + dir_)
python
def _create_path(self): """Create the path to hold the database, if one wwas specified.""" if self.driver == 'sqlite' and 'memory' not in self.dsn and self.dsn != 'sqlite://': dir_ = os.path.dirname(self.path) if dir_ and not os.path.exists(dir_): try: # Multiple process may try to make, so it could already # exist os.makedirs(dir_) except Exception: pass if not os.path.exists(dir_): raise Exception("Couldn't create directory " + dir_)
[ "def", "_create_path", "(", "self", ")", ":", "if", "self", ".", "driver", "==", "'sqlite'", "and", "'memory'", "not", "in", "self", ".", "dsn", "and", "self", ".", "dsn", "!=", "'sqlite://'", ":", "dir_", "=", "os", ".", "path", ".", "dirname", "(",...
Create the path to hold the database, if one wwas specified.
[ "Create", "the", "path", "to", "hold", "the", "database", "if", "one", "wwas", "specified", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L117-L133
CivicSpleen/ambry
ambry/orm/database.py
Database.exists
def exists(self): """Return True if the database exists, or for Sqlite, which will create the file on the first reference, the file has been initialized with the root config """ if self.driver == 'sqlite' and not os.path.exists(self.path): return False # init engine self.engine try: # Since we are using the connection, rather than the session, need to # explicitly set the search path. from sqlalchemy.engine.reflection import Inspector inspector = Inspector.from_engine(self.engine) if 'config' in inspector.get_table_names(schema=self._schema): return True else: return False finally: self.close_connection()
python
def exists(self): """Return True if the database exists, or for Sqlite, which will create the file on the first reference, the file has been initialized with the root config """ if self.driver == 'sqlite' and not os.path.exists(self.path): return False # init engine self.engine try: # Since we are using the connection, rather than the session, need to # explicitly set the search path. from sqlalchemy.engine.reflection import Inspector inspector = Inspector.from_engine(self.engine) if 'config' in inspector.get_table_names(schema=self._schema): return True else: return False finally: self.close_connection()
[ "def", "exists", "(", "self", ")", ":", "if", "self", ".", "driver", "==", "'sqlite'", "and", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")", ":", "return", "False", "# init engine", "self", ".", "engine", "try", ":", "# Since...
Return True if the database exists, or for Sqlite, which will create the file on the first reference, the file has been initialized with the root config
[ "Return", "True", "if", "the", "database", "exists", "or", "for", "Sqlite", "which", "will", "create", "the", "file", "on", "the", "first", "reference", "the", "file", "has", "been", "initialized", "with", "the", "root", "config" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L139-L162
CivicSpleen/ambry
ambry/orm/database.py
Database.engine
def engine(self): """return the SqlAlchemy engine for this database.""" if not self._engine: if 'postgres' in self.driver: if 'connect_args' not in self.engine_kwargs: self.engine_kwargs['connect_args'] = { 'application_name': '{}:{}'.format(self._application_prefix, os.getpid()) } # For most use, a small pool is good to prevent connection exhaustion, but these settings may # be too low for the main public web application. self._engine = create_engine(self.dsn, echo=self._echo, pool_size=5, max_overflow=5, **self.engine_kwargs) else: self._engine = create_engine( self.dsn, echo=self._echo, **self.engine_kwargs) # # Disconnect connections that have a different PID from the one they were created in. # This protects against re-use in multi-processing. # @event.listens_for(self._engine, 'connect') def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @event.listens_for(self._engine, 'checkout') def checkout(dbapi_connection, connection_record, connection_proxy): from sqlalchemy.exc import DisconnectionError pid = os.getpid() if connection_record.info['pid'] != pid: connection_record.connection = connection_proxy.connection = None raise DisconnectionError( "Connection record belongs to pid %s, attempting to check out in pid %s" % (connection_record.info['pid'], pid)) if self.driver == 'sqlite': @event.listens_for(self._engine, 'connect') def pragma_on_connect(dbapi_con, con_record): """ISSUE some Sqlite pragmas when the connection is created.""" # dbapi_con.execute('PRAGMA foreign_keys = ON;') # Not clear that there is a performance improvement. # dbapi_con.execute('PRAGMA journal_mode = WAL') dbapi_con.execute('PRAGMA synchronous = OFF') dbapi_con.execute('PRAGMA temp_store = MEMORY') dbapi_con.execute('PRAGMA cache_size = 500000') if self._foreign_keys: dbapi_con.execute('PRAGMA foreign_keys=ON') with self._engine.connect() as conn: _validate_version(conn, self.dsn) return self._engine
python
def engine(self): """return the SqlAlchemy engine for this database.""" if not self._engine: if 'postgres' in self.driver: if 'connect_args' not in self.engine_kwargs: self.engine_kwargs['connect_args'] = { 'application_name': '{}:{}'.format(self._application_prefix, os.getpid()) } # For most use, a small pool is good to prevent connection exhaustion, but these settings may # be too low for the main public web application. self._engine = create_engine(self.dsn, echo=self._echo, pool_size=5, max_overflow=5, **self.engine_kwargs) else: self._engine = create_engine( self.dsn, echo=self._echo, **self.engine_kwargs) # # Disconnect connections that have a different PID from the one they were created in. # This protects against re-use in multi-processing. # @event.listens_for(self._engine, 'connect') def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @event.listens_for(self._engine, 'checkout') def checkout(dbapi_connection, connection_record, connection_proxy): from sqlalchemy.exc import DisconnectionError pid = os.getpid() if connection_record.info['pid'] != pid: connection_record.connection = connection_proxy.connection = None raise DisconnectionError( "Connection record belongs to pid %s, attempting to check out in pid %s" % (connection_record.info['pid'], pid)) if self.driver == 'sqlite': @event.listens_for(self._engine, 'connect') def pragma_on_connect(dbapi_con, con_record): """ISSUE some Sqlite pragmas when the connection is created.""" # dbapi_con.execute('PRAGMA foreign_keys = ON;') # Not clear that there is a performance improvement. # dbapi_con.execute('PRAGMA journal_mode = WAL') dbapi_con.execute('PRAGMA synchronous = OFF') dbapi_con.execute('PRAGMA temp_store = MEMORY') dbapi_con.execute('PRAGMA cache_size = 500000') if self._foreign_keys: dbapi_con.execute('PRAGMA foreign_keys=ON') with self._engine.connect() as conn: _validate_version(conn, self.dsn) return self._engine
[ "def", "engine", "(", "self", ")", ":", "if", "not", "self", ".", "_engine", ":", "if", "'postgres'", "in", "self", ".", "driver", ":", "if", "'connect_args'", "not", "in", "self", ".", "engine_kwargs", ":", "self", ".", "engine_kwargs", "[", "'connect_a...
return the SqlAlchemy engine for this database.
[ "return", "the", "SqlAlchemy", "engine", "for", "this", "database", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L165-L225
CivicSpleen/ambry
ambry/orm/database.py
Database.connection
def connection(self): """Return an SqlAlchemy connection.""" if not self._connection: logger.debug('Opening connection to: {}'.format(self.dsn)) self._connection = self.engine.connect() logger.debug('Opened connection to: {}'.format(self.dsn)) # logger.debug("Opening connection to: {}".format(self.dsn)) return self._connection
python
def connection(self): """Return an SqlAlchemy connection.""" if not self._connection: logger.debug('Opening connection to: {}'.format(self.dsn)) self._connection = self.engine.connect() logger.debug('Opened connection to: {}'.format(self.dsn)) # logger.debug("Opening connection to: {}".format(self.dsn)) return self._connection
[ "def", "connection", "(", "self", ")", ":", "if", "not", "self", ".", "_connection", ":", "logger", ".", "debug", "(", "'Opening connection to: {}'", ".", "format", "(", "self", ".", "dsn", ")", ")", "self", ".", "_connection", "=", "self", ".", "engine"...
Return an SqlAlchemy connection.
[ "Return", "an", "SqlAlchemy", "connection", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L228-L236
CivicSpleen/ambry
ambry/orm/database.py
Database.session
def session(self): """Return a SqlAlchemy session.""" from sqlalchemy.orm import sessionmaker from sqlalchemy.event import listen if not self.Session: self.Session = sessionmaker(bind=self.engine) if not self._session: self._session = self.Session() # set the search path if self._schema: def after_begin(session, transaction, connection): # import traceback # print traceback.print_stack() session.execute('SET search_path TO {}'.format(self._schema)) listen(self._session, 'after_begin', after_begin) return self._session
python
def session(self): """Return a SqlAlchemy session.""" from sqlalchemy.orm import sessionmaker from sqlalchemy.event import listen if not self.Session: self.Session = sessionmaker(bind=self.engine) if not self._session: self._session = self.Session() # set the search path if self._schema: def after_begin(session, transaction, connection): # import traceback # print traceback.print_stack() session.execute('SET search_path TO {}'.format(self._schema)) listen(self._session, 'after_begin', after_begin) return self._session
[ "def", "session", "(", "self", ")", ":", "from", "sqlalchemy", ".", "orm", "import", "sessionmaker", "from", "sqlalchemy", ".", "event", "import", "listen", "if", "not", "self", ".", "Session", ":", "self", ".", "Session", "=", "sessionmaker", "(", "bind",...
Return a SqlAlchemy session.
[ "Return", "a", "SqlAlchemy", "session", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L239-L259
CivicSpleen/ambry
ambry/orm/database.py
Database.metadata
def metadata(self): """Return an SqlAlchemy MetaData object, bound to the engine.""" from sqlalchemy import MetaData metadata = MetaData(bind=self.engine, schema=self._schema) metadata.reflect(self.engine) return metadata
python
def metadata(self): """Return an SqlAlchemy MetaData object, bound to the engine.""" from sqlalchemy import MetaData metadata = MetaData(bind=self.engine, schema=self._schema) metadata.reflect(self.engine) return metadata
[ "def", "metadata", "(", "self", ")", ":", "from", "sqlalchemy", "import", "MetaData", "metadata", "=", "MetaData", "(", "bind", "=", "self", ".", "engine", ",", "schema", "=", "self", ".", "_schema", ")", "metadata", ".", "reflect", "(", "self", ".", "...
Return an SqlAlchemy MetaData object, bound to the engine.
[ "Return", "an", "SqlAlchemy", "MetaData", "object", "bound", "to", "the", "engine", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L362-L371
CivicSpleen/ambry
ambry/orm/database.py
Database._add_config_root
def _add_config_root(self): """ Adds the root dataset, which holds configuration values for the database. """ try: self.session.query(Dataset).filter_by(id=ROOT_CONFIG_NAME).one() self.close_session() except NoResultFound: o = Dataset( id=ROOT_CONFIG_NAME, vid=ROOT_CONFIG_NAME_V, name=ROOT_CONFIG_NAME, vname=ROOT_CONFIG_NAME_V, fqname='datasetroot-0.0.0~' + ROOT_CONFIG_NAME_V, cache_key=ROOT_CONFIG_NAME, version='0.0.0', source=ROOT_CONFIG_NAME, dataset=ROOT_CONFIG_NAME, revision=1, ) self.session.add(o) self.commit()
python
def _add_config_root(self): """ Adds the root dataset, which holds configuration values for the database. """ try: self.session.query(Dataset).filter_by(id=ROOT_CONFIG_NAME).one() self.close_session() except NoResultFound: o = Dataset( id=ROOT_CONFIG_NAME, vid=ROOT_CONFIG_NAME_V, name=ROOT_CONFIG_NAME, vname=ROOT_CONFIG_NAME_V, fqname='datasetroot-0.0.0~' + ROOT_CONFIG_NAME_V, cache_key=ROOT_CONFIG_NAME, version='0.0.0', source=ROOT_CONFIG_NAME, dataset=ROOT_CONFIG_NAME, revision=1, ) self.session.add(o) self.commit()
[ "def", "_add_config_root", "(", "self", ")", ":", "try", ":", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter_by", "(", "id", "=", "ROOT_CONFIG_NAME", ")", ".", "one", "(", ")", "self", ".", "close_session", "(", ")", "except", ...
Adds the root dataset, which holds configuration values for the database.
[ "Adds", "the", "root", "dataset", "which", "holds", "configuration", "values", "for", "the", "database", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L422-L442
CivicSpleen/ambry
ambry/orm/database.py
Database.new_dataset
def new_dataset(self, *args, **kwargs): """ Creates a new dataset :param args: Positional args passed to the Dataset constructor. :param kwargs: Keyword args passed to the Dataset constructor. :return: :class:`ambry.orm.Dataset` :raises: :class:`ambry.orm.ConflictError` if the a Dataset records already exists with the given vid """ ds = Dataset(*args, **kwargs) try: self.session.add(ds) self.session.commit() ds._database = self return ds except IntegrityError as e: self.session.rollback() raise ConflictError( "Can't create dataset '{}'; one probably already exists: {} ".format(str(ds), e))
python
def new_dataset(self, *args, **kwargs): """ Creates a new dataset :param args: Positional args passed to the Dataset constructor. :param kwargs: Keyword args passed to the Dataset constructor. :return: :class:`ambry.orm.Dataset` :raises: :class:`ambry.orm.ConflictError` if the a Dataset records already exists with the given vid """ ds = Dataset(*args, **kwargs) try: self.session.add(ds) self.session.commit() ds._database = self return ds except IntegrityError as e: self.session.rollback() raise ConflictError( "Can't create dataset '{}'; one probably already exists: {} ".format(str(ds), e))
[ "def", "new_dataset", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ds", "=", "Dataset", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "self", ".", "session", ".", "add", "(", "ds", ")", "self", ".", "session"...
Creates a new dataset :param args: Positional args passed to the Dataset constructor. :param kwargs: Keyword args passed to the Dataset constructor. :return: :class:`ambry.orm.Dataset` :raises: :class:`ambry.orm.ConflictError` if the a Dataset records already exists with the given vid
[ "Creates", "a", "new", "dataset" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L448-L467
CivicSpleen/ambry
ambry/orm/database.py
Database.root_dataset
def root_dataset(self): """Return the root dataset, which hold configuration values for the library""" ds = self.dataset(ROOT_CONFIG_NAME_V) ds._database = self return ds
python
def root_dataset(self): """Return the root dataset, which hold configuration values for the library""" ds = self.dataset(ROOT_CONFIG_NAME_V) ds._database = self return ds
[ "def", "root_dataset", "(", "self", ")", ":", "ds", "=", "self", ".", "dataset", "(", "ROOT_CONFIG_NAME_V", ")", "ds", ".", "_database", "=", "self", "return", "ds" ]
Return the root dataset, which hold configuration values for the library
[ "Return", "the", "root", "dataset", "which", "hold", "configuration", "values", "for", "the", "library" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L470-L474
CivicSpleen/ambry
ambry/orm/database.py
Database.package_dataset
def package_dataset(self): """For sqlite bundle packages, return the first ( and only ) dataset""" return self.session.query(Dataset).filter(Dataset.vid != ROOT_CONFIG_NAME_V).one()
python
def package_dataset(self): """For sqlite bundle packages, return the first ( and only ) dataset""" return self.session.query(Dataset).filter(Dataset.vid != ROOT_CONFIG_NAME_V).one()
[ "def", "package_dataset", "(", "self", ")", ":", "return", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter", "(", "Dataset", ".", "vid", "!=", "ROOT_CONFIG_NAME_V", ")", ".", "one", "(", ")" ]
For sqlite bundle packages, return the first ( and only ) dataset
[ "For", "sqlite", "bundle", "packages", "return", "the", "first", "(", "and", "only", ")", "dataset" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L477-L480
CivicSpleen/ambry
ambry/orm/database.py
Database.dataset
def dataset(self, ref, load_all=False, exception=True): """Return a dataset, given a vid or id :param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the largest revision number :param load_all: Use a query that eagerly loads everything. :return: :class:`ambry.orm.Dataset` """ ref = str(ref) try: ds = self.session.query(Dataset).filter(Dataset.vid == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.id == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if not ds: try: ds = self.session.query(Dataset).filter(Dataset.vname == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.name == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if ds: ds._database = self return ds elif exception: raise NotFoundError('No dataset in library for vid : {} '.format(ref)) else: return None
python
def dataset(self, ref, load_all=False, exception=True): """Return a dataset, given a vid or id :param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the largest revision number :param load_all: Use a query that eagerly loads everything. :return: :class:`ambry.orm.Dataset` """ ref = str(ref) try: ds = self.session.query(Dataset).filter(Dataset.vid == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.id == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if not ds: try: ds = self.session.query(Dataset).filter(Dataset.vname == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.name == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if ds: ds._database = self return ds elif exception: raise NotFoundError('No dataset in library for vid : {} '.format(ref)) else: return None
[ "def", "dataset", "(", "self", ",", "ref", ",", "load_all", "=", "False", ",", "exception", "=", "True", ")", ":", "ref", "=", "str", "(", "ref", ")", "try", ":", "ds", "=", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter"...
Return a dataset, given a vid or id :param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the largest revision number :param load_all: Use a query that eagerly loads everything. :return: :class:`ambry.orm.Dataset`
[ "Return", "a", "dataset", "given", "a", "vid", "or", "id" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L482-L531
CivicSpleen/ambry
ambry/orm/database.py
Database.datasets
def datasets(self): """ Return all datasets :return: """ return self.session.query(Dataset).filter(Dataset.vid != ROOT_CONFIG_NAME_V).all()
python
def datasets(self): """ Return all datasets :return: """ return self.session.query(Dataset).filter(Dataset.vid != ROOT_CONFIG_NAME_V).all()
[ "def", "datasets", "(", "self", ")", ":", "return", "self", ".", "session", ".", "query", "(", "Dataset", ")", ".", "filter", "(", "Dataset", ".", "vid", "!=", "ROOT_CONFIG_NAME_V", ")", ".", "all", "(", ")" ]
Return all datasets :return:
[ "Return", "all", "datasets" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L547-L554
CivicSpleen/ambry
ambry/orm/database.py
Database.delete_tables_partitions
def delete_tables_partitions(self, ds): """Fast delete of all of a datasets codes, columns, partitions and tables""" from ambry.orm import Code, Column, Table, Partition, ColumnStat, Process ssq = self.session.query ssq(Process).filter(Process.d_vid == ds.vid).delete() ssq(Code).filter(Code.d_vid == ds.vid).delete() ssq(ColumnStat).filter(ColumnStat.d_vid == ds.vid).delete() ssq(Column).filter(Column.d_vid == ds.vid).delete() ssq(Partition).filter(Partition.d_vid == ds.vid).delete() for source in ds.sources: source._dest_table = None ssq(Table).filter(Table.d_vid == ds.vid).delete()
python
def delete_tables_partitions(self, ds): """Fast delete of all of a datasets codes, columns, partitions and tables""" from ambry.orm import Code, Column, Table, Partition, ColumnStat, Process ssq = self.session.query ssq(Process).filter(Process.d_vid == ds.vid).delete() ssq(Code).filter(Code.d_vid == ds.vid).delete() ssq(ColumnStat).filter(ColumnStat.d_vid == ds.vid).delete() ssq(Column).filter(Column.d_vid == ds.vid).delete() ssq(Partition).filter(Partition.d_vid == ds.vid).delete() for source in ds.sources: source._dest_table = None ssq(Table).filter(Table.d_vid == ds.vid).delete()
[ "def", "delete_tables_partitions", "(", "self", ",", "ds", ")", ":", "from", "ambry", ".", "orm", "import", "Code", ",", "Column", ",", "Table", ",", "Partition", ",", "ColumnStat", ",", "Process", "ssq", "=", "self", ".", "session", ".", "query", "ssq",...
Fast delete of all of a datasets codes, columns, partitions and tables
[ "Fast", "delete", "of", "all", "of", "a", "datasets", "codes", "columns", "partitions", "and", "tables" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L563-L578
CivicSpleen/ambry
ambry/orm/database.py
Database.delete_partitions
def delete_partitions(self, ds): """Fast delete of all of a datasets codes, columns, partitions and tables""" from ambry.orm import Partition ssq = self.session.query ssq(Process).filter(Process.d_vid == ds.vid).delete() ssq(Code).filter(Code.d_vid == ds.vid).delete() ssq(ColumnStat).filter(ColumnStat.d_vid == ds.vid).delete() ssq(Partition).filter(Partition.d_vid == ds.vid).delete()
python
def delete_partitions(self, ds): """Fast delete of all of a datasets codes, columns, partitions and tables""" from ambry.orm import Partition ssq = self.session.query ssq(Process).filter(Process.d_vid == ds.vid).delete() ssq(Code).filter(Code.d_vid == ds.vid).delete() ssq(ColumnStat).filter(ColumnStat.d_vid == ds.vid).delete() ssq(Partition).filter(Partition.d_vid == ds.vid).delete()
[ "def", "delete_partitions", "(", "self", ",", "ds", ")", ":", "from", "ambry", ".", "orm", "import", "Partition", "ssq", "=", "self", ".", "session", ".", "query", "ssq", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "ds", "...
Fast delete of all of a datasets codes, columns, partitions and tables
[ "Fast", "delete", "of", "all", "of", "a", "datasets", "codes", "columns", "partitions", "and", "tables" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L580-L589
CivicSpleen/ambry
ambry/orm/database.py
Database.copy_dataset
def copy_dataset(self, ds, incver=False, cb=None, **kwargs): """ Copy a dataset into the database. :param ds: The source dataset to copy :param cb: A progress callback, taking two parameters: cb(message, num_records) :return: """ from ambry.orm import Table, Column, Partition, File, ColumnStat, Code, \ DataSource, SourceTable, SourceColumn tables = [Table, Column, Partition, File, ColumnStat, Code, SourceTable, SourceColumn, DataSource] return self._copy_dataset_copy(ds, tables, incver, cb, **kwargs)
python
def copy_dataset(self, ds, incver=False, cb=None, **kwargs): """ Copy a dataset into the database. :param ds: The source dataset to copy :param cb: A progress callback, taking two parameters: cb(message, num_records) :return: """ from ambry.orm import Table, Column, Partition, File, ColumnStat, Code, \ DataSource, SourceTable, SourceColumn tables = [Table, Column, Partition, File, ColumnStat, Code, SourceTable, SourceColumn, DataSource] return self._copy_dataset_copy(ds, tables, incver, cb, **kwargs)
[ "def", "copy_dataset", "(", "self", ",", "ds", ",", "incver", "=", "False", ",", "cb", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "ambry", ".", "orm", "import", "Table", ",", "Column", ",", "Partition", ",", "File", ",", "ColumnStat", ...
Copy a dataset into the database. :param ds: The source dataset to copy :param cb: A progress callback, taking two parameters: cb(message, num_records) :return:
[ "Copy", "a", "dataset", "into", "the", "database", ".", ":", "param", "ds", ":", "The", "source", "dataset", "to", "copy", ":", "param", "cb", ":", "A", "progress", "callback", "taking", "two", "parameters", ":", "cb", "(", "message", "num_records", ")",...
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L591-L603
CivicSpleen/ambry
ambry/orm/database.py
Database.copy_dataset_files
def copy_dataset_files(self, ds, incver=False, cb=None, **kwargs): """ Copy only files and configs into the database. :param ds: The source dataset to copy :param cb: A progress callback, taking two parameters: cb(message, num_records) :return: """ from ambry.orm import File tables = [File] return self._copy_dataset_copy(ds, tables, incver, cb, **kwargs)
python
def copy_dataset_files(self, ds, incver=False, cb=None, **kwargs): """ Copy only files and configs into the database. :param ds: The source dataset to copy :param cb: A progress callback, taking two parameters: cb(message, num_records) :return: """ from ambry.orm import File tables = [File] return self._copy_dataset_copy(ds, tables, incver, cb, **kwargs)
[ "def", "copy_dataset_files", "(", "self", ",", "ds", ",", "incver", "=", "False", ",", "cb", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "ambry", ".", "orm", "import", "File", "tables", "=", "[", "File", "]", "return", "self", ".", "_co...
Copy only files and configs into the database. :param ds: The source dataset to copy :param cb: A progress callback, taking two parameters: cb(message, num_records) :return:
[ "Copy", "only", "files", "and", "configs", "into", "the", "database", ".", ":", "param", "ds", ":", "The", "source", "dataset", "to", "copy", ":", "param", "cb", ":", "A", "progress", "callback", "taking", "two", "parameters", ":", "cb", "(", "message", ...
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L605-L616
CivicSpleen/ambry
ambry/orm/database.py
Database.next_sequence_id
def next_sequence_id(self, parent_table_class, parent_vid, child_table_class): """Get the next sequence id for child objects for a parent object that has a child sequence field""" from sqlalchemy import text # Name of sequence id column in the child c_seq_col = child_table_class.sequence_id.property.columns[0].name p_seq_col = getattr(parent_table_class, c_seq_col).property.columns[0].name p_vid_col = parent_table_class.vid.property.columns[0].name if self.driver == 'sqlite': # The Sqlite version is not atomic, but Sqlite also doesn't support concurrency # So, we don't have to open a new connection, but we also can't open a new connection, so # this uses the session. self.commit() sql = text("SELECT {p_seq_col} FROM {p_table} WHERE {p_vid_col} = '{parent_vid}' " .format(p_table=parent_table_class.__tablename__, p_seq_col=p_seq_col, p_vid_col=p_vid_col, parent_vid=parent_vid)) v = next(iter(self.session.execute(sql)))[0] sql = text("UPDATE {p_table} SET {p_seq_col} = {p_seq_col} + 1 WHERE {p_vid_col} = '{parent_vid}' " .format(p_table=parent_table_class.__tablename__, p_seq_col=p_seq_col, p_vid_col=p_vid_col, parent_vid=parent_vid)) self.session.execute(sql) self.commit() return v else: # Must be postgres, or something else that supports "RETURNING" sql = text(""" UPDATE {p_table} SET {p_seq_col} = {p_seq_col} + 1 WHERE {p_vid_col} = '{parent_vid}' RETURNING {p_seq_col} """.format(p_table=parent_table_class.__tablename__, p_seq_col=p_seq_col, p_vid_col=p_vid_col, parent_vid=parent_vid)) self.connection.execute('SET search_path TO {}'.format(self._schema)) r = self.connection.execute(sql) v = next(iter(r))[0] return v-1
python
def next_sequence_id(self, parent_table_class, parent_vid, child_table_class): """Get the next sequence id for child objects for a parent object that has a child sequence field""" from sqlalchemy import text # Name of sequence id column in the child c_seq_col = child_table_class.sequence_id.property.columns[0].name p_seq_col = getattr(parent_table_class, c_seq_col).property.columns[0].name p_vid_col = parent_table_class.vid.property.columns[0].name if self.driver == 'sqlite': # The Sqlite version is not atomic, but Sqlite also doesn't support concurrency # So, we don't have to open a new connection, but we also can't open a new connection, so # this uses the session. self.commit() sql = text("SELECT {p_seq_col} FROM {p_table} WHERE {p_vid_col} = '{parent_vid}' " .format(p_table=parent_table_class.__tablename__, p_seq_col=p_seq_col, p_vid_col=p_vid_col, parent_vid=parent_vid)) v = next(iter(self.session.execute(sql)))[0] sql = text("UPDATE {p_table} SET {p_seq_col} = {p_seq_col} + 1 WHERE {p_vid_col} = '{parent_vid}' " .format(p_table=parent_table_class.__tablename__, p_seq_col=p_seq_col, p_vid_col=p_vid_col, parent_vid=parent_vid)) self.session.execute(sql) self.commit() return v else: # Must be postgres, or something else that supports "RETURNING" sql = text(""" UPDATE {p_table} SET {p_seq_col} = {p_seq_col} + 1 WHERE {p_vid_col} = '{parent_vid}' RETURNING {p_seq_col} """.format(p_table=parent_table_class.__tablename__, p_seq_col=p_seq_col, p_vid_col=p_vid_col, parent_vid=parent_vid)) self.connection.execute('SET search_path TO {}'.format(self._schema)) r = self.connection.execute(sql) v = next(iter(r))[0] return v-1
[ "def", "next_sequence_id", "(", "self", ",", "parent_table_class", ",", "parent_vid", ",", "child_table_class", ")", ":", "from", "sqlalchemy", "import", "text", "# Name of sequence id column in the child", "c_seq_col", "=", "child_table_class", ".", "sequence_id", ".", ...
Get the next sequence id for child objects for a parent object that has a child sequence field
[ "Get", "the", "next", "sequence", "id", "for", "child", "objects", "for", "a", "parent", "object", "that", "has", "a", "child", "sequence", "field" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L716-L757
CivicSpleen/ambry
ambry/orm/database.py
BaseMigration.create_table
def create_table(table, connection, schema=None): """Create a single table, primarily used din migrations""" orig_schemas = {} # These schema shenanigans are almost certainly wrong. # But they are expedient. For Postgres, it puts the library # tables in the Library schema. We need to change the schema for all tables in case # the table we are creating references another table if schema: connection.execute("SET search_path TO {}".format(schema)) for table in ALL_TABLES: orig_schemas[table.__table__] = table.__table__.schema table.__table__.schema = schema table.__table__.create(bind=connection.engine) # We have to put the schemas back because when installing to a warehouse. # the same library classes can be used to access a Sqlite database, which # does not handle schemas. if schema: for it, orig_schema in list(orig_schemas.items()): it.schema = orig_schema
python
def create_table(table, connection, schema=None): """Create a single table, primarily used din migrations""" orig_schemas = {} # These schema shenanigans are almost certainly wrong. # But they are expedient. For Postgres, it puts the library # tables in the Library schema. We need to change the schema for all tables in case # the table we are creating references another table if schema: connection.execute("SET search_path TO {}".format(schema)) for table in ALL_TABLES: orig_schemas[table.__table__] = table.__table__.schema table.__table__.schema = schema table.__table__.create(bind=connection.engine) # We have to put the schemas back because when installing to a warehouse. # the same library classes can be used to access a Sqlite database, which # does not handle schemas. if schema: for it, orig_schema in list(orig_schemas.items()): it.schema = orig_schema
[ "def", "create_table", "(", "table", ",", "connection", ",", "schema", "=", "None", ")", ":", "orig_schemas", "=", "{", "}", "# These schema shenanigans are almost certainly wrong.", "# But they are expedient. For Postgres, it puts the library", "# tables in the Library schema. W...
Create a single table, primarily used din migrations
[ "Create", "a", "single", "table", "primarily", "used", "din", "migrations" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L796-L819
twisted/epsilon
epsilon/hotfixes/deferredgenerator_tfailure.py
_deferGenerator
def _deferGenerator(g, deferred=None): """ See L{waitForDeferred}. """ result = None while 1: if deferred is None: deferred = defer.Deferred() try: result = g.next() except StopIteration: deferred.callback(result) return deferred except: deferred.errback() return deferred # Deferred.callback(Deferred) raises an error; we catch this case # early here and give a nicer error message to the user in case # they yield a Deferred. Perhaps eventually these semantics may # change. if isinstance(result, defer.Deferred): return defer.fail(TypeError("Yield waitForDeferred(d), not d!")) if isinstance(result, defer.waitForDeferred): waiting = [True, None] # Pass vars in so they don't get changed going around the loop def gotResult(r, waiting=waiting, result=result): result.result = r if waiting[0]: waiting[0] = False waiting[1] = r else: _deferGenerator(g, deferred) result.d.addBoth(gotResult) if waiting[0]: # Haven't called back yet, set flag so that we get reinvoked # and return from the loop waiting[0] = False return deferred result = None
python
def _deferGenerator(g, deferred=None): """ See L{waitForDeferred}. """ result = None while 1: if deferred is None: deferred = defer.Deferred() try: result = g.next() except StopIteration: deferred.callback(result) return deferred except: deferred.errback() return deferred # Deferred.callback(Deferred) raises an error; we catch this case # early here and give a nicer error message to the user in case # they yield a Deferred. Perhaps eventually these semantics may # change. if isinstance(result, defer.Deferred): return defer.fail(TypeError("Yield waitForDeferred(d), not d!")) if isinstance(result, defer.waitForDeferred): waiting = [True, None] # Pass vars in so they don't get changed going around the loop def gotResult(r, waiting=waiting, result=result): result.result = r if waiting[0]: waiting[0] = False waiting[1] = r else: _deferGenerator(g, deferred) result.d.addBoth(gotResult) if waiting[0]: # Haven't called back yet, set flag so that we get reinvoked # and return from the loop waiting[0] = False return deferred result = None
[ "def", "_deferGenerator", "(", "g", ",", "deferred", "=", "None", ")", ":", "result", "=", "None", "while", "1", ":", "if", "deferred", "is", "None", ":", "deferred", "=", "defer", ".", "Deferred", "(", ")", "try", ":", "result", "=", "g", ".", "ne...
See L{waitForDeferred}.
[ "See", "L", "{", "waitForDeferred", "}", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/deferredgenerator_tfailure.py#L11-L51
SmartTeleMax/iktomi
iktomi/unstable/utils/functools.py
return_locals
def return_locals(func): '''Modifies decorated function to return its locals''' @functools.wraps(func) def wrap(*args, **kwargs): frames = [] def tracer(frame, event, arg): # pragma: no cover # coverage does not work in this function because the tracer # is deactivated here frames.append(frame) sys.settrace(old_tracer) if old_tracer is not None: return old_tracer(frame, event, arg) old_tracer = sys.gettrace() # tracer is activated on next call, return or exception sys.settrace(tracer) try: func(*args, **kwargs) finally: sys.settrace(old_tracer) assert len(frames) == 1 argspec = inspect.getargspec(func) argnames = list(argspec.args) if argspec.varargs is not None: argnames.append(argspec.varargs) if argspec.keywords is not None: argnames.append(argspec.keywords) return {name: value for name, value in frames.pop(0).f_locals.items() if name not in argnames} return wrap
python
def return_locals(func): '''Modifies decorated function to return its locals''' @functools.wraps(func) def wrap(*args, **kwargs): frames = [] def tracer(frame, event, arg): # pragma: no cover # coverage does not work in this function because the tracer # is deactivated here frames.append(frame) sys.settrace(old_tracer) if old_tracer is not None: return old_tracer(frame, event, arg) old_tracer = sys.gettrace() # tracer is activated on next call, return or exception sys.settrace(tracer) try: func(*args, **kwargs) finally: sys.settrace(old_tracer) assert len(frames) == 1 argspec = inspect.getargspec(func) argnames = list(argspec.args) if argspec.varargs is not None: argnames.append(argspec.varargs) if argspec.keywords is not None: argnames.append(argspec.keywords) return {name: value for name, value in frames.pop(0).f_locals.items() if name not in argnames} return wrap
[ "def", "return_locals", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "frames", "=", "[", "]", "def", "tracer", "(", "frame", ",", "event", ",", "arg",...
Modifies decorated function to return its locals
[ "Modifies", "decorated", "function", "to", "return", "its", "locals" ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/unstable/utils/functools.py#L6-L38
SmartTeleMax/iktomi
iktomi/db/sqla/__init__.py
multidb_binds
def multidb_binds(databases, package=None, engine_params=None): '''Creates dictionary to be passed as `binds` parameter to `sqlalchemy.orm.sessionmaker()` from dictionary mapping models module name to connection URI that should be used for these models. Models module must have `metadata` attribute. `package` when set must be a package or package name for all models modules.''' engine_params = engine_params or {} if not (package is None or isinstance(package, six.string_types)): package = getattr(package, '__package__', None) or package.__name__ binds = {} for ref, uri in databases.items(): md_ref = '.'.join(filter(None, [package, ref])) md_module = import_module(md_ref) try: metadata = md_module.metadata except AttributeError: raise ImportError( 'Cannot import name metadata from module {}'.format(md_ref)) engine = create_engine(uri, **engine_params) # Dot before [name] is required to allow setting logging level etc. for # all them at once. engine.logger = logging.getLogger('sqlalchemy.engine.[%s]' % ref) for table in metadata.sorted_tables: binds[table] = engine return binds
python
def multidb_binds(databases, package=None, engine_params=None): '''Creates dictionary to be passed as `binds` parameter to `sqlalchemy.orm.sessionmaker()` from dictionary mapping models module name to connection URI that should be used for these models. Models module must have `metadata` attribute. `package` when set must be a package or package name for all models modules.''' engine_params = engine_params or {} if not (package is None or isinstance(package, six.string_types)): package = getattr(package, '__package__', None) or package.__name__ binds = {} for ref, uri in databases.items(): md_ref = '.'.join(filter(None, [package, ref])) md_module = import_module(md_ref) try: metadata = md_module.metadata except AttributeError: raise ImportError( 'Cannot import name metadata from module {}'.format(md_ref)) engine = create_engine(uri, **engine_params) # Dot before [name] is required to allow setting logging level etc. for # all them at once. engine.logger = logging.getLogger('sqlalchemy.engine.[%s]' % ref) for table in metadata.sorted_tables: binds[table] = engine return binds
[ "def", "multidb_binds", "(", "databases", ",", "package", "=", "None", ",", "engine_params", "=", "None", ")", ":", "engine_params", "=", "engine_params", "or", "{", "}", "if", "not", "(", "package", "is", "None", "or", "isinstance", "(", "package", ",", ...
Creates dictionary to be passed as `binds` parameter to `sqlalchemy.orm.sessionmaker()` from dictionary mapping models module name to connection URI that should be used for these models. Models module must have `metadata` attribute. `package` when set must be a package or package name for all models modules.
[ "Creates", "dictionary", "to", "be", "passed", "as", "binds", "parameter", "to", "sqlalchemy", ".", "orm", ".", "sessionmaker", "()", "from", "dictionary", "mapping", "models", "module", "name", "to", "connection", "URI", "that", "should", "be", "used", "for",...
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/db/sqla/__init__.py#L8-L32
twisted/epsilon
epsilon/descriptor.py
requiredAttribute
def requiredAttribute(requiredAttributeName): """ Utility for defining attributes on base classes/mixins which require their values to be supplied by their derived classes. C{None} is a common, but almost never suitable default value for these kinds of attributes, as it may cause operations in the derived class to fail silently in peculiar ways. If a C{requiredAttribute} is accessed before having its value changed, a C{AttributeError} will be raised with a helpful error message. @param requiredAttributeName: The name of the required attribute. @type requiredAttributeName: C{str} Example: >>> from epsilon.descriptor import requiredAttribute ... >>> class FooTestMixin: ... expectedResult = requiredAttribute('expectedResult') ... >>> class BrokenFooTestCase(TestCase, FooTestMixin): ... pass ... >>> brokenFoo = BrokenFooTestCase() >>> print brokenFoo.expectedResult Traceback (most recent call last): ... AttributeError: Required attribute 'expectedResult' has not been changed from its default value on '<BrokenFooTestCase instance>'. ... >>> class WorkingFooTestCase(TestCase, FooTestMixin): ... expectedResult = 7 ... >>> workingFoo = WorkingFooTestCase() >>> print workingFoo.expectedResult ... 7 >>> """ class RequiredAttribute(attribute): def get(self): if requiredAttributeName not in self.__dict__: raise AttributeError( ('Required attribute %r has not been changed' ' from its default value on %r' % ( requiredAttributeName, self))) return self.__dict__[requiredAttributeName] def set(self, value): self.__dict__[requiredAttributeName] = value return RequiredAttribute
python
def requiredAttribute(requiredAttributeName): """ Utility for defining attributes on base classes/mixins which require their values to be supplied by their derived classes. C{None} is a common, but almost never suitable default value for these kinds of attributes, as it may cause operations in the derived class to fail silently in peculiar ways. If a C{requiredAttribute} is accessed before having its value changed, a C{AttributeError} will be raised with a helpful error message. @param requiredAttributeName: The name of the required attribute. @type requiredAttributeName: C{str} Example: >>> from epsilon.descriptor import requiredAttribute ... >>> class FooTestMixin: ... expectedResult = requiredAttribute('expectedResult') ... >>> class BrokenFooTestCase(TestCase, FooTestMixin): ... pass ... >>> brokenFoo = BrokenFooTestCase() >>> print brokenFoo.expectedResult Traceback (most recent call last): ... AttributeError: Required attribute 'expectedResult' has not been changed from its default value on '<BrokenFooTestCase instance>'. ... >>> class WorkingFooTestCase(TestCase, FooTestMixin): ... expectedResult = 7 ... >>> workingFoo = WorkingFooTestCase() >>> print workingFoo.expectedResult ... 7 >>> """ class RequiredAttribute(attribute): def get(self): if requiredAttributeName not in self.__dict__: raise AttributeError( ('Required attribute %r has not been changed' ' from its default value on %r' % ( requiredAttributeName, self))) return self.__dict__[requiredAttributeName] def set(self, value): self.__dict__[requiredAttributeName] = value return RequiredAttribute
[ "def", "requiredAttribute", "(", "requiredAttributeName", ")", ":", "class", "RequiredAttribute", "(", "attribute", ")", ":", "def", "get", "(", "self", ")", ":", "if", "requiredAttributeName", "not", "in", "self", ".", "__dict__", ":", "raise", "AttributeError"...
Utility for defining attributes on base classes/mixins which require their values to be supplied by their derived classes. C{None} is a common, but almost never suitable default value for these kinds of attributes, as it may cause operations in the derived class to fail silently in peculiar ways. If a C{requiredAttribute} is accessed before having its value changed, a C{AttributeError} will be raised with a helpful error message. @param requiredAttributeName: The name of the required attribute. @type requiredAttributeName: C{str} Example: >>> from epsilon.descriptor import requiredAttribute ... >>> class FooTestMixin: ... expectedResult = requiredAttribute('expectedResult') ... >>> class BrokenFooTestCase(TestCase, FooTestMixin): ... pass ... >>> brokenFoo = BrokenFooTestCase() >>> print brokenFoo.expectedResult Traceback (most recent call last): ... AttributeError: Required attribute 'expectedResult' has not been changed from its default value on '<BrokenFooTestCase instance>'. ... >>> class WorkingFooTestCase(TestCase, FooTestMixin): ... expectedResult = 7 ... >>> workingFoo = WorkingFooTestCase() >>> print workingFoo.expectedResult ... 7 >>>
[ "Utility", "for", "defining", "attributes", "on", "base", "classes", "/", "mixins", "which", "require", "their", "values", "to", "be", "supplied", "by", "their", "derived", "classes", ".", "C", "{", "None", "}", "is", "a", "common", "but", "almost", "never...
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/descriptor.py#L96-L143
project-ncl/pnc-cli
pnc_cli/builds.py
list_builds
def list_builds(page_size=200, page_index=0, sort="", q=""): """ List all builds :param page_size: number of builds returned per query :param sort: RSQL sorting query :param q: RSQL query :return: """ response = utils.checked_api_call(pnc_api.builds_running, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q) if response: return response.content
python
def list_builds(page_size=200, page_index=0, sort="", q=""): """ List all builds :param page_size: number of builds returned per query :param sort: RSQL sorting query :param q: RSQL query :return: """ response = utils.checked_api_call(pnc_api.builds_running, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q) if response: return response.content
[ "def", "list_builds", "(", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "response", "=", "utils", ".", "checked_api_call", "(", "pnc_api", ".", "builds_running", ",", "'get_all'", ",", ...
List all builds :param page_size: number of builds returned per query :param sort: RSQL sorting query :param q: RSQL query :return:
[ "List", "all", "builds", ":", "param", "page_size", ":", "number", "of", "builds", "returned", "per", "query", ":", "param", "sort", ":", "RSQL", "sorting", "query", ":", "param", "q", ":", "RSQL", "query", ":", "return", ":" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/builds.py#L16-L26
project-ncl/pnc-cli
pnc_cli/generate_repo.py
generate_repo_list
def generate_repo_list(product_name=None, product_version=None, product_milestone=None): """ Generates list of artifacts for offline repository. """ if not validate_input_parameters(product_name, product_version, product_milestone): sys.exit(1) product_version = pnc_api.product_versions.get_all(q="version=='"+ product_version + "';product.name=='"+product_name+"'") if not product_version.content: logging.error('Specified product version not found.') sys.exit(1) product_version_id = product_version.content[0].id milestone = pnc_api.product_milestones.get_all(q="version=='"+ product_milestone + "';productVersion.id=='"+str(product_version_id)+"'") if not milestone.content: logging.error('Specified milestone not found.') sys.exit(1) milestone_id = milestone.content[0].id builds = get_all_successful_builds(milestone_id) if not builds: logging.warning('No builds performed in the milestone.') return for build in builds: built_artifacts = get_all_artifacts(build.id) for artifact in built_artifacts: print(artifact.identifier)
python
def generate_repo_list(product_name=None, product_version=None, product_milestone=None): """ Generates list of artifacts for offline repository. """ if not validate_input_parameters(product_name, product_version, product_milestone): sys.exit(1) product_version = pnc_api.product_versions.get_all(q="version=='"+ product_version + "';product.name=='"+product_name+"'") if not product_version.content: logging.error('Specified product version not found.') sys.exit(1) product_version_id = product_version.content[0].id milestone = pnc_api.product_milestones.get_all(q="version=='"+ product_milestone + "';productVersion.id=='"+str(product_version_id)+"'") if not milestone.content: logging.error('Specified milestone not found.') sys.exit(1) milestone_id = milestone.content[0].id builds = get_all_successful_builds(milestone_id) if not builds: logging.warning('No builds performed in the milestone.') return for build in builds: built_artifacts = get_all_artifacts(build.id) for artifact in built_artifacts: print(artifact.identifier)
[ "def", "generate_repo_list", "(", "product_name", "=", "None", ",", "product_version", "=", "None", ",", "product_milestone", "=", "None", ")", ":", "if", "not", "validate_input_parameters", "(", "product_name", ",", "product_version", ",", "product_milestone", ")",...
Generates list of artifacts for offline repository.
[ "Generates", "list", "of", "artifacts", "for", "offline", "repository", "." ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/generate_repo.py#L12-L35
project-ncl/pnc-cli
pnc_cli/user_config.py
login
def login(username=None, password=None): """ Log in to PNC using the supplied username and password. The keycloak token will be saved for all subsequent pnc-cli operations until login is called again :return: """ global user user = UserConfig() if username: user.username = username else: user.username = user.input_username() if password: user.password = password else: user.password = user.input_password() if (not ( user.username and user.password) ): logging.error("Username and password must be provided for login") return; user.retrieve_keycloak_token() user.apiclient = user.create_api_client() save()
python
def login(username=None, password=None): """ Log in to PNC using the supplied username and password. The keycloak token will be saved for all subsequent pnc-cli operations until login is called again :return: """ global user user = UserConfig() if username: user.username = username else: user.username = user.input_username() if password: user.password = password else: user.password = user.input_password() if (not ( user.username and user.password) ): logging.error("Username and password must be provided for login") return; user.retrieve_keycloak_token() user.apiclient = user.create_api_client() save()
[ "def", "login", "(", "username", "=", "None", ",", "password", "=", "None", ")", ":", "global", "user", "user", "=", "UserConfig", "(", ")", "if", "username", ":", "user", ".", "username", "=", "username", "else", ":", "user", ".", "username", "=", "...
Log in to PNC using the supplied username and password. The keycloak token will be saved for all subsequent pnc-cli operations until login is called again :return:
[ "Log", "in", "to", "PNC", "using", "the", "supplied", "username", "and", "password", ".", "The", "keycloak", "token", "will", "be", "saved", "for", "all", "subsequent", "pnc", "-", "cli", "operations", "until", "login", "is", "called", "again", ":", "retur...
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/user_config.py#L265-L288
twisted/epsilon
epsilon/hotfixes/delayedcall_seconds.py
DelayedCall.cancel
def cancel(self): """Unschedule this call @raise AlreadyCancelled: Raised if this call has already been unscheduled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.canceller(self) self.cancelled = 1 if self.debug: self._str = str(self) del self.func, self.args, self.kw
python
def cancel(self): """Unschedule this call @raise AlreadyCancelled: Raised if this call has already been unscheduled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.canceller(self) self.cancelled = 1 if self.debug: self._str = str(self) del self.func, self.args, self.kw
[ "def", "cancel", "(", "self", ")", ":", "if", "self", ".", "cancelled", ":", "raise", "error", ".", "AlreadyCancelled", "elif", "self", ".", "called", ":", "raise", "error", ".", "AlreadyCalled", "else", ":", "self", ".", "canceller", "(", "self", ")", ...
Unschedule this call @raise AlreadyCancelled: Raised if this call has already been unscheduled. @raise AlreadyCalled: Raised if this call has already been made.
[ "Unschedule", "this", "call" ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/delayedcall_seconds.py#L38-L55
twisted/epsilon
epsilon/hotfixes/delayedcall_seconds.py
DelayedCall.reset
def reset(self, secondsFromNow): """Reschedule this call for a different time @type secondsFromNow: C{float} @param secondsFromNow: The number of seconds from the time of the C{reset} call at which this call will be scheduled. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: if self.seconds is None: new_time = base.seconds() + secondsFromNow else: new_time = self.seconds() + secondsFromNow if new_time < self.time: self.delayed_time = 0 self.time = new_time self.resetter(self) else: self.delayed_time = new_time - self.time
python
def reset(self, secondsFromNow): """Reschedule this call for a different time @type secondsFromNow: C{float} @param secondsFromNow: The number of seconds from the time of the C{reset} call at which this call will be scheduled. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: if self.seconds is None: new_time = base.seconds() + secondsFromNow else: new_time = self.seconds() + secondsFromNow if new_time < self.time: self.delayed_time = 0 self.time = new_time self.resetter(self) else: self.delayed_time = new_time - self.time
[ "def", "reset", "(", "self", ",", "secondsFromNow", ")", ":", "if", "self", ".", "cancelled", ":", "raise", "error", ".", "AlreadyCancelled", "elif", "self", ".", "called", ":", "raise", "error", ".", "AlreadyCalled", "else", ":", "if", "self", ".", "sec...
Reschedule this call for a different time @type secondsFromNow: C{float} @param secondsFromNow: The number of seconds from the time of the C{reset} call at which this call will be scheduled. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made.
[ "Reschedule", "this", "call", "for", "a", "different", "time" ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/delayedcall_seconds.py#L57-L81
twisted/epsilon
epsilon/hotfixes/delayedcall_seconds.py
DelayedCall.delay
def delay(self, secondsLater): """Reschedule this call for a later time @type secondsLater: C{float} @param secondsLater: The number of seconds after the originally scheduled time for which to reschedule this call. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.delayed_time += secondsLater if self.delayed_time < 0: self.activate_delay() self.resetter(self)
python
def delay(self, secondsLater): """Reschedule this call for a later time @type secondsLater: C{float} @param secondsLater: The number of seconds after the originally scheduled time for which to reschedule this call. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.delayed_time += secondsLater if self.delayed_time < 0: self.activate_delay() self.resetter(self)
[ "def", "delay", "(", "self", ",", "secondsLater", ")", ":", "if", "self", ".", "cancelled", ":", "raise", "error", ".", "AlreadyCancelled", "elif", "self", ".", "called", ":", "raise", "error", ".", "AlreadyCalled", "else", ":", "self", ".", "delayed_time"...
Reschedule this call for a later time @type secondsLater: C{float} @param secondsLater: The number of seconds after the originally scheduled time for which to reschedule this call. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made.
[ "Reschedule", "this", "call", "for", "a", "later", "time" ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/delayedcall_seconds.py#L83-L101
twisted/epsilon
doc/listings/amp/auth_server.py
main
def main(): """ Start the AMP server and the reactor. """ startLogging(stdout) checker = InMemoryUsernamePasswordDatabaseDontUse() checker.addUser("testuser", "examplepass") realm = AdditionRealm() factory = CredAMPServerFactory(Portal(realm, [checker])) reactor.listenTCP(7805, factory) reactor.run()
python
def main(): """ Start the AMP server and the reactor. """ startLogging(stdout) checker = InMemoryUsernamePasswordDatabaseDontUse() checker.addUser("testuser", "examplepass") realm = AdditionRealm() factory = CredAMPServerFactory(Portal(realm, [checker])) reactor.listenTCP(7805, factory) reactor.run()
[ "def", "main", "(", ")", ":", "startLogging", "(", "stdout", ")", "checker", "=", "InMemoryUsernamePasswordDatabaseDontUse", "(", ")", "checker", ".", "addUser", "(", "\"testuser\"", ",", "\"examplepass\"", ")", "realm", "=", "AdditionRealm", "(", ")", "factory"...
Start the AMP server and the reactor.
[ "Start", "the", "AMP", "server", "and", "the", "reactor", "." ]
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/doc/listings/amp/auth_server.py#L63-L73
jedie/django-cms-tools
django_cms_tools/plugin_landing_page/views.py
LandingPageDetailView.set_meta
def set_meta(self, instance): """ Set django-meta stuff from LandingPageModel instance. """ self.use_title_tag = True self.title = instance.title
python
def set_meta(self, instance): """ Set django-meta stuff from LandingPageModel instance. """ self.use_title_tag = True self.title = instance.title
[ "def", "set_meta", "(", "self", ",", "instance", ")", ":", "self", ".", "use_title_tag", "=", "True", "self", ".", "title", "=", "instance", ".", "title" ]
Set django-meta stuff from LandingPageModel instance.
[ "Set", "django", "-", "meta", "stuff", "from", "LandingPageModel", "instance", "." ]
train
https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/plugin_landing_page/views.py#L36-L41
SmartTeleMax/iktomi
iktomi/forms/perms.py
FieldPerm.check
def check(self, field): ''' Returns permissions determined by object itself ''' if self.permissions is None: return field.parent.permissions return self.permissions
python
def check(self, field): ''' Returns permissions determined by object itself ''' if self.permissions is None: return field.parent.permissions return self.permissions
[ "def", "check", "(", "self", ",", "field", ")", ":", "if", "self", ".", "permissions", "is", "None", ":", "return", "field", ".", "parent", ".", "permissions", "return", "self", ".", "permissions" ]
Returns permissions determined by object itself
[ "Returns", "permissions", "determined", "by", "object", "itself" ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/forms/perms.py#L77-L83
PythonRails/rails
rails/views/__init__.py
View._load_view
def _load_view(self, template_engine_name, template_dir): """ Load view by name and return an instance. """ file_name = template_engine_name.lower() class_name = "{}View".format(template_engine_name.title()) try: view_module = import_module("rails.views.{}".format(file_name)) except ImportError: raise Exception("Template engine '{}' not found in 'rails.views'".format(file_name)) view_class = getattr(view_module, class_name) return view_class(template_dir)
python
def _load_view(self, template_engine_name, template_dir): """ Load view by name and return an instance. """ file_name = template_engine_name.lower() class_name = "{}View".format(template_engine_name.title()) try: view_module = import_module("rails.views.{}".format(file_name)) except ImportError: raise Exception("Template engine '{}' not found in 'rails.views'".format(file_name)) view_class = getattr(view_module, class_name) return view_class(template_dir)
[ "def", "_load_view", "(", "self", ",", "template_engine_name", ",", "template_dir", ")", ":", "file_name", "=", "template_engine_name", ".", "lower", "(", ")", "class_name", "=", "\"{}View\"", ".", "format", "(", "template_engine_name", ".", "title", "(", ")", ...
Load view by name and return an instance.
[ "Load", "view", "by", "name", "and", "return", "an", "instance", "." ]
train
https://github.com/PythonRails/rails/blob/1e199b9da4da5b24fef39fc6212d71fc9fbb18a5/rails/views/__init__.py#L18-L29
SmartTeleMax/iktomi
iktomi/utils/system.py
terminate
def terminate(pid, sig, timeout): '''Terminates process with PID `pid` and returns True if process finished during `timeout`. Current user must have permission to access process information.''' os.kill(pid, sig) start = time.time() while True: try: # This is requireed if it's our child to avoid zombie. Also # is_running() returns True for zombie process. _, status = os.waitpid(pid, os.WNOHANG) except OSError as exc: if exc.errno != errno.ECHILD: # pragma: nocover raise else: if status: return True if not is_running(pid): return True if time.time()-start>=timeout: return False time.sleep(0.1)
python
def terminate(pid, sig, timeout): '''Terminates process with PID `pid` and returns True if process finished during `timeout`. Current user must have permission to access process information.''' os.kill(pid, sig) start = time.time() while True: try: # This is requireed if it's our child to avoid zombie. Also # is_running() returns True for zombie process. _, status = os.waitpid(pid, os.WNOHANG) except OSError as exc: if exc.errno != errno.ECHILD: # pragma: nocover raise else: if status: return True if not is_running(pid): return True if time.time()-start>=timeout: return False time.sleep(0.1)
[ "def", "terminate", "(", "pid", ",", "sig", ",", "timeout", ")", ":", "os", ".", "kill", "(", "pid", ",", "sig", ")", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "try", ":", "# This is requireed if it's our child to avoid zombie. Als...
Terminates process with PID `pid` and returns True if process finished during `timeout`. Current user must have permission to access process information.
[ "Terminates", "process", "with", "PID", "pid", "and", "returns", "True", "if", "process", "finished", "during", "timeout", ".", "Current", "user", "must", "have", "permission", "to", "access", "process", "information", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/utils/system.py#L18-L39