function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def GetStatus(self): """ 返回本次Mutation的结果状态 Returns: (class Status) 操作结果状态,可以获知成功或失败,若失败,具体原因 """ return Status(lib.tera_row_mutation_get_status_code(self.mutation))
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def DeleteColumn(self, cf, qu): """ 删除这一行上 ColumnFamily为<cf>, Qualifier为<qu>的cell Args: cf(string): ColumnFamily名 qu(string): Qualifier名 """ lib.tera_row_mutation_delete_column(self.mutation, cf, qu, c_uint64(len(qu)))
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def PutInt64(self, cf, qu, value): """ 写入(修改)这一行上 ColumnFamily为<cf>, Qualifier为<qu>的cell值为<value> Args: cf(string): ColumnFamily名 qu(string): Qualifier名 value(long): cell的值 """ lib.tera_row_mutation_put_int64(self.mutation, cf, qu, c_uint64(len(qu)), value)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def __init__(self, table): """ init """ self.table = table
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def NewRowMutation(self, rowkey): """ 生成一个对 rowkey 的RowMutation对象(修改一行) 一个RowMutation对某一行的操作(例如多列修改)是原子的 Args: rowkey(string): 待变更的rowkey Returns: (class RowMutation): RowMutation对象 """ return RowMutation(lib.tera_row_mutation(self.table, rowkey, c_uint64(len(rowkey))))
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def NewRowReader(self, rowkey): """ 生成一个对 rowkey 的RowReader对象(读取一行) 一个RowReader对某一行的操作(例如读取多列)是原子的 Args: rowkey(string): 待读取的rowkey Returns: (class RowReader): RowReader对象 """ return RowReader(lib.tera_row_reader(self.table, rowkey, c_uint64(len(rowkey))))
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def IsPutFinished(self): """ table的异步写操作是否*全部*完成 Returns: (bool) 全部完成则返回true,否则返回false. """ return lib.tera_table_is_put_finished(self.table)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def BatchGet(self, row_reader_list): """ 批量get 用法类似 ApplyReader Args: row_reader_list(RowReader): 预先构造好的RowReader列表 每一行的读取结果存储在row_reader_list里对应的每个RowReader内, 如果该行读取成功(即返回的状态码是OK), 那么可以调用诸如RowReader.Value()访问读取结果 否则读取出错,通过状态码确定原因。 用法详见sample.py """ num = len(row_reader_list) r = list() for i in row_reader_list: r.append(i.reader) reader_array = (c_void_p * num)(*r) lib.tera_table_apply_reader_batch(self.table, reader_array, num)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def GetInt64(self, rowkey, cf, qu, snapshot): """ 类同Get()接口,区别是将cell的内容作为int64计数器返回 对非int64计数器的cell调用此方法属于未定义行为 Args: rowkey(string): Rowkey的值 cf(string): ColumnFamily名 qu(string): Qualifier名 snapshot(long): 快照,不关心的用户设置为0即可 Returns: (long) cell的数值 Raises: TeraSdkException: 读操作失败 """ err = c_char_p() value = c_int64() result = lib.tera_table_getint64( self.table, rowkey, c_uint64(len(rowkey)), cf, qu, c_uint64(len(qu)), byref(value), byref(err), c_uint64(snapshot) ) if not result: raise TeraSdkException("get record failed:" + err.value) return long(value.value)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def BatchPut(self, row_mutation_list): """ 批量put 用法类似 ApplyMutation Args: row_mutation_list(RowMutation): 预先构造好的RowMutation列表 每一行的写入操作返回状态存储在row_mutation_list里对应的每个RowMutation内, 如果写入失败,通过状态码确定原因。 用法详见sample.py """ num = len(row_mutation_list) r = list() for i in row_mutation_list: r.append(i.mutation) mutation_array = (c_void_p * num)(*r) lib.tera_table_apply_mutation_batch(self.table, mutation_array, num)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def Delete(self, rowkey, cf, qu): """ 同步删除某个cell Args: rowkey(string): Rowkey的值 cf(string): ColumnFamily名 qu(string): Qualifier名 """ lib.tera_table_delete( self.table, rowkey, c_uint64(len(rowkey)), cf, qu, c_uint64(len(qu)) )
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def __init__(self, reader): """ init """ self.reader = reader
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def AddColumn(self, cf, qu): """ 添加期望读取的Column 默认读取一行(row)的全部Column(ColumnFamily + Qualifier) Args: cf(string): 期望读取的ColumnFamily qu(string): 期望读取的Qualifier """ lib.tera_row_reader_add_column(self.reader, cf, qu, c_uint64(len(qu)))
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def SetTimestamp(self, ts): """ set timestamp """ lib.tera_row_reader_set_timestamp(self.reader, ts)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def SetSnapshot(self, snapshot): """ set snapshot """ lib.tera_row_reader_set_snapshot(self.reader, snapshot)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def SetTimeout(self, timeout): """ set timeout """ lib.tera_row_reader_set_timeout(self.reader, timeout)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def Next(self): """ 迭代到下一个cell """ lib.tera_row_reader_next(self.reader)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def Value(self): """ Returns: (string) 当前cell对应的value """ value = POINTER(c_ubyte)() vallen = c_uint64() lib.tera_row_reader_value(self.reader, byref(value), byref(vallen)) return copy_string_to_user(value, long(vallen.value))
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def Family(self): """ Returns: (string) 当前cell对应的ColumnFamily """ value = POINTER(c_ubyte)() vallen = c_uint64() lib.tera_row_reader_family(self.reader, byref(value), byref(vallen)) return copy_string_to_user(value, long(vallen.value))
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def Timestamp(self): """ Returns: (long) 当前cell对应的时间戳,Unix time """ return lib.tera_row_reader_timestamp(self.reader)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def Destroy(self): """ 销毁这个mutation,释放底层资源,以后不得再使用这个对象 """ lib.tera_row_reader_destroy(self.reader)
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def __init__(self, reason): """ init """ self.reason = reason
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def init_function_prototype_for_scan(): """ scan """ ###################### # scan result stream # ###################### lib.tera_result_stream_done.argtypes = [c_void_p, POINTER(c_char_p)] lib.tera_result_stream_done.restype = c_bool lib.tera_result_stream_destroy.argtypes = [c_void_p] lib.tera_result_stream_destroy.restype = None lib.tera_result_stream_timestamp.argtypes = [c_void_p] lib.tera_result_stream_timestamp.restype = c_int64 lib.tera_result_stream_column_name.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_result_stream_column_name.restype = None lib.tera_result_stream_family.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_result_stream_family.restype = None lib.tera_result_stream_next.argtypes = [c_void_p] lib.tera_result_stream_next.restype = None lib.tera_result_stream_qualifier.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_result_stream_qualifier.restype = None lib.tera_result_stream_row_name.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_result_stream_row_name.restype = None lib.tera_result_stream_value.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_result_stream_value.restype = None lib.tera_result_stream_value_int64.argtypes = [c_void_p] lib.tera_result_stream_value_int64.restype = c_int64 ################### # scan descriptor # ################### lib.tera_scan_descriptor.argtypes = [c_char_p, c_uint64] lib.tera_scan_descriptor.restype = c_void_p lib.tera_scan_descriptor_destroy.argtypes = [c_void_p] lib.tera_scan_descriptor_destroy.restype = None lib.tera_scan_descriptor_add_column.argtypes = [c_void_p, c_char_p, c_char_p, c_uint64] lib.tera_scan_descriptor_add_column.restype = None lib.tera_scan_descriptor_add_column_family.argtypes = [c_void_p, c_char_p] lib.tera_scan_descriptor_add_column_family.restype = None lib.tera_scan_descriptor_set_buffer_size.argtypes = [c_void_p, c_int64] lib.tera_scan_descriptor_set_buffer_size.restype = None lib.tera_scan_descriptor_set_end.argtypes = [c_void_p, c_char_p, c_uint64] lib.tera_scan_descriptor_set_end.restype = None lib.tera_scan_descriptor_set_pack_interval.argtypes = [c_char_p, c_int64] lib.tera_scan_descriptor_set_pack_interval.restype = None lib.tera_scan_descriptor_set_max_versions.argtypes = [c_void_p, c_int32] lib.tera_scan_descriptor_set_max_versions.restype = None lib.tera_scan_descriptor_set_snapshot.argtypes = [c_void_p, c_uint64] lib.tera_scan_descriptor_set_snapshot.restype = None lib.tera_scan_descriptor_set_time_range.argtypes = [c_void_p, c_int64, c_int64] lib.tera_scan_descriptor_set_time_range.restype = None
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def init_function_prototype_for_table(): """ table """ lib.tera_table_get.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p, c_char_p, c_uint64, POINTER(POINTER(c_ubyte)), POINTER(c_uint64), POINTER(c_char_p), c_uint64] lib.tera_table_get.restype = c_bool lib.tera_table_getint64.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p, c_char_p, c_uint64, POINTER(c_int64), POINTER(c_char_p), c_uint64] lib.tera_table_getint64.restype = c_bool lib.tera_table_put.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p, c_char_p, c_uint64, c_char_p, c_uint64, POINTER(c_char_p)] lib.tera_table_put.restype = c_bool lib.tera_table_put_kv.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p, c_uint64, c_int32, POINTER(c_char_p)] lib.tera_table_put_kv.restype = c_bool lib.tera_table_putint64.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p, c_char_p, c_uint64, c_int64, POINTER(c_char_p)] lib.tera_table_putint64.restype = c_bool lib.tera_table_scan.argtypes = [c_void_p, c_void_p, POINTER(c_char_p)] lib.tera_table_scan.restype = c_void_p lib.tera_table_delete.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p, c_char_p, c_uint64] lib.tera_table_delete.restype = c_bool lib.tera_table_apply_mutation.argtypes = [c_void_p, c_void_p] lib.tera_table_apply_mutation.restype = None lib.tera_table_apply_mutation_batch.argtypes = [c_void_p, c_void_p, c_int64] lib.tera_table_apply_mutation_batch.restype = None lib.tera_table_is_put_finished.argtypes = [c_void_p] lib.tera_table_is_put_finished.restype = c_bool lib.tera_table_apply_reader.argtypes = [c_void_p, c_void_p] lib.tera_table_apply_reader.restype = None lib.tera_table_apply_reader_batch.argtypes = [c_void_p, c_void_p, c_int64] lib.tera_table_apply_reader_batch.restype = None lib.tera_table_is_get_finished.argtypes = [c_void_p] lib.tera_table_is_get_finished.restype = c_bool lib.tera_row_mutation.argtypes = [c_void_p, c_char_p, c_uint64] lib.tera_row_mutation.restype = c_void_p lib.tera_row_mutation_get_status_code.argtypes = [c_void_p] lib.tera_row_mutation_get_status_code.restype = c_int64 lib.tera_row_mutation_destroy.argtypes = [c_void_p] lib.tera_row_mutation_destroy.restype = None
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def init_function_prototype_for_row_reader(): """ row_reader """ lib.tera_row_reader.argtypes = [c_void_p, c_char_p, c_uint64] lib.tera_row_reader.restype = c_void_p lib.tera_row_reader_add_column_family.argtypes = [c_void_p, c_char_p] lib.tera_row_reader_add_column_family.restype = None lib.tera_row_reader_add_column.argtypes = [c_void_p, c_char_p, c_char_p, c_uint64] lib.tera_row_reader_add_column.restype = None lib.tera_row_reader_set_callback.argtypes = [c_void_p, READER_CALLBACK] lib.tera_row_reader_set_callback.restype = None lib.tera_row_reader_set_timestamp.argtypes = [c_void_p, c_int64] lib.tera_row_reader_set_timestamp.restype = None lib.tera_row_reader_set_time_range.argtypes = [c_void_p, c_int64, c_int64] lib.tera_row_reader_set_time_range.restype = None lib.tera_row_reader_set_snapshot.argtypes = [c_void_p, c_uint64] lib.tera_row_reader_set_snapshot.restype = None lib.tera_row_reader_set_max_versions.argtypes = [c_void_p, c_uint32] lib.tera_row_reader_set_max_versions.restype = None lib.tera_row_reader_set_timeout.argtypes = [c_void_p, c_int64] lib.tera_row_reader_set_timeout.restype = None lib.tera_row_reader_done.argtypes = [c_void_p] lib.tera_row_reader_done.restype = c_bool lib.tera_row_reader_next.argtypes = [c_void_p] lib.tera_row_reader_next.restype = None lib.tera_row_reader_rowkey.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_row_reader_rowkey.restype = None lib.tera_row_reader_value.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_row_reader_value.restype = None lib.tera_row_reader_value_int64.argtypes = [c_void_p] lib.tera_row_reader_value_int64.restype = c_int64 lib.tera_row_reader_family.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_row_reader_family.restype = None lib.tera_row_reader_qualifier.argtypes = [c_void_p, POINTER(POINTER(c_ubyte)), POINTER(c_uint64)] lib.tera_row_reader_qualifier.restype = None lib.tera_row_reader_timestamp.argtypes = [c_void_p] lib.tera_row_reader_timestamp.restype = c_int64 lib.tera_row_reader_get_status_code.argtypes = [c_void_p] lib.tera_row_reader_get_status_code.restype = c_int64 lib.tera_row_reader_destroy.argtypes = [c_void_p] lib.tera_row_reader_destroy.restype = None
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def copy_string_to_user(value, size): """ copy string """ result = string_at(value, size) libc.free(value) return result
BaiduPS/tera
[ 1876, 447, 1876, 171, 1395818525 ]
def main(filename): """ Given an input file containing nothing but styles, print out an unrolled list of declarations in cascade order. """ input = open(filename, 'r').read() declarations = cascadenik.stylesheet_declarations(input, is_merc=True)
mapnik/Cascadenik
[ 107, 19, 107, 13, 1281417927 ]
def __init__(self,**params): self.warning("CFPLF_SOM is deprecated -- see the example in cfsom_or.ty for how to build a SOM")
ioam/topographica
[ 51, 31, 51, 228, 1348109103 ]
def test_ClientAuthMethod_login(self): ca = ClientAuthMethod(username,password) self.assertNotEqual(ca, None)
askedrelic/libgreader
[ 70, 24, 70, 3, 1256666681 ]
def test_bad_user_details(self): self.assertRaises(IOError, ClientAuthMethod, 'asdsa', '')
askedrelic/libgreader
[ 70, 24, 70, 3, 1256666681 ]
def automated_oauth_approval(url): #general process is: # 1. assume user isn't logged in, so get redirected to google accounts # login page. login using test account credentials # 2. redirected back to oauth approval page. br.submit() should choose the # first submit on that page, which is the "Accept" button br = mechanize.Browser() br.open(url) br.select_form(nr=0) br["Email"] = username br["Passwd"] = password response1 = br.submit() br.select_form(nr=0) req2 = br.click(type="submit", nr=0) response2 = br.open(req2) return response2
askedrelic/libgreader
[ 70, 24, 70, 3, 1256666681 ]
def test_oauth_login(self): auth = OAuthMethod(oauth_key, oauth_secret) self.assertNotEqual(auth, None)
askedrelic/libgreader
[ 70, 24, 70, 3, 1256666681 ]
def test_full_auth_process_without_callback(self): auth = OAuthMethod(oauth_key, oauth_secret) auth.setRequestToken() auth_url = auth.buildAuthUrl() response = automated_oauth_approval(auth_url) auth.setAccessToken() reader = GoogleReader(auth) info = reader.getUserInfo() self.assertEqual(dict, type(info)) self.assertEqual(firstname, info['userName'])
askedrelic/libgreader
[ 70, 24, 70, 3, 1256666681 ]
def mechanize_oauth2_approval(url): """ general process is: 1. assume user isn't logged in, so get redirected to google accounts login page. login using account credentials But, if the user has already granted access, the user is auto redirected without having to confirm again. 2. redirected back to oauth approval page. br.submit() should choose the first submit on that page, which is the "Accept" button 3. mechanize follows the redirect, and should throw 40X exception and we return the token """ br = mechanize.Browser() br.open(url) br.select_form(nr=0) br["Email"] = username br["Passwd"] = password try: response1 = br.submit() br.select_form(nr=0) response2 = br.submit() except Exception as e: #watch for 40X exception on trying to load redirect page pass callback_url = br.geturl() # split off the token in hackish fashion return callback_url.split('code=')[1]
askedrelic/libgreader
[ 70, 24, 70, 3, 1256666681 ]
def test_full_auth_and_access_userdata(self): auth = OAuth2Method(client_id, client_secret) auth.setRedirectUri(redirect_url) url = auth.buildAuthUrl() token = automated_oauth2_approval(url) auth.code = token auth.setAccessToken() reader = GoogleReader(auth) info = reader.getUserInfo() self.assertEqual(dict, type(info)) self.assertEqual(firstname, info['userName'])
askedrelic/libgreader
[ 70, 24, 70, 3, 1256666681 ]
def __init__(self, parent, view, model): """ Constructor of the navigation component :param sakia.gui.network.view.NetworkView: the view :param sakia.gui.network.model.NetworkModel model: the model """ super().__init__(parent) self.view = view self.model = model table_model = self.model.init_network_table_model() self.view.set_network_table_model(table_model) self.view.manual_refresh_clicked.connect(self.refresh_nodes_manually) self.view.table_network.customContextMenuRequested.connect(self.node_context_menu)
ucoin-io/cutecoin
[ 62, 24, 62, 67, 1391110718 ]
def create(cls, parent, app, network_service): """ :param PyQt5.QObject parent: :param sakia.app.Application app: :param sakia.services.NetworkService network_service: :return: """ view = NetworkView(parent.view,) model = NetworkModel(None, app, network_service) txhistory = cls(parent, view, model) model.setParent(txhistory) return txhistory
ucoin-io/cutecoin
[ 62, 24, 62, 67, 1391110718 ]
def refresh_nodes_manually(self): self.model.refresh_nodes_once()
ucoin-io/cutecoin
[ 62, 24, 62, 67, 1391110718 ]
def set_root_node(self): node = self.sender().data() self.model.add_root_node(node)
ucoin-io/cutecoin
[ 62, 24, 62, 67, 1391110718 ]
def unset_root_node(self): node = self.sender().data() self.model.unset_root_node(node)
ucoin-io/cutecoin
[ 62, 24, 62, 67, 1391110718 ]
def validate_filter_rules(filter_rules, all_categories): """Validate the given filter rules, and raise a ValueError if not valid. Args: filter_rules: A list of boolean filter rules, for example-- ["-whitespace", "+whitespace/braces"] all_categories: A list of all available category names, for example-- ["whitespace/tabs", "whitespace/braces"] Raises: ValueError: An error occurs if a filter rule does not begin with "+" or "-" or if a filter rule does not match the beginning of some category name in the list of all available categories. """ for rule in filter_rules: if not (rule.startswith('+') or rule.startswith('-')): raise ValueError('Invalid filter rule "%s": every rule ' "must start with + or -." % rule) for category in all_categories: if category.startswith(rule[1:]): break else: raise ValueError('Suspected incorrect filter rule "%s": ' "the rule does not match the beginning " "of any category name." % rule)
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def __init__(self, filter_rules=None): """Create a category filter. Args: filter_rules: A list of strings that are filter rules, which are strings beginning with the plus or minus symbol (+/-). The list should include any default filter rules at the beginning. Defaults to the empty list. Raises: ValueError: Invalid filter rule if a rule does not start with plus ("+") or minus ("-"). """ if filter_rules is None: filter_rules = [] self._filter_rules = filter_rules self._should_check_category = {} # Cached dictionary of category to True/False
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def __eq__(self, other): """Return whether this CategoryFilter instance is equal to another.""" return self._filter_rules == other._filter_rules
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def __ne__(self, other): # Python does not automatically deduce from __eq__(). return not (self == other)
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def __init__(self, base_rules=None, path_specific=None, user_rules=None): """Create a FilterConfiguration instance. Args: base_rules: The starting list of filter rules to use for processing. The default is the empty list, which by itself would mean that all categories should be checked. path_specific: A list of (sub_paths, path_rules) pairs that stores the path-specific filter rules for appending to the base rules. The "sub_paths" value is a list of path substrings. If a file path contains one of the substrings, then the corresponding path rules are appended. The first substring match takes precedence, i.e. only the first match triggers an append. The "path_rules" value is the tuple of filter rules that can be appended to the base rules. The value is a tuple rather than a list so it can be used as a dictionary key. The dictionary is for caching purposes in the implementation of this class. user_rules: A list of filter rules that is always appended to the base rules and any path rules. In other words, the user rules take precedence over the everything. In practice, the user rules are provided by the user from the command line. """ if base_rules is None: base_rules = [] if path_specific is None: path_specific = [] if user_rules is None: user_rules = [] self._base_rules = base_rules self._path_specific = path_specific self._path_specific_lower = None """The backing store for self._get_path_specific_lower().""" # FIXME: Make user rules internal after the FilterConfiguration # attribute is removed from ProcessorOptions (since at # that point ArgumentPrinter will no longer need to # access FilterConfiguration.user_rules). self.user_rules = user_rules self._path_rules_to_filter = {} """Cached dictionary of path rules to CategoryFilter instance.""" # The same CategoryFilter instance can be shared across # multiple keys in this dictionary. This allows us to take # greater advantage of the caching done by # CategoryFilter.should_check(). self._path_to_filter = {} """Cached dictionary of file path to CategoryFilter instance."""
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def __eq__(self, other): """Return whether this FilterConfiguration is equal to another.""" if self._base_rules != other._base_rules: return False if self._path_specific != other._path_specific: return False if self.user_rules != other.user_rules: return False return True
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def __ne__(self, other): # Python does not automatically deduce this from __eq__(). return not self.__eq__(other)
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def _get_path_specific_lower(self): """Return a copy of self._path_specific with the paths lower-cased.""" if self._path_specific_lower is None: self._path_specific_lower = [] for (sub_paths, path_rules) in self._path_specific: sub_paths = map(str.lower, sub_paths) self._path_specific_lower.append((sub_paths, path_rules)) return self._path_specific_lower
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def _filter_from_path_rules(self, path_rules): """Return the CategoryFilter associated to a path rules tuple.""" # We reuse the same CategoryFilter where possible to take # advantage of the caching they do. if path_rules not in self._path_rules_to_filter: rules = list(self._base_rules) # Make a copy rules.extend(path_rules) rules.extend(self.user_rules) self._path_rules_to_filter[path_rules] = _CategoryFilter(rules) return self._path_rules_to_filter[path_rules]
cattleprod/samsung-kernel-gt-i9100
[ 1, 1, 1, 1, 1322678144 ]
def __init__(self, items = [], loop = False): """Initialisation. items = set of items that can be iterated over. Must be finite. If an iterator is supplied, it is enumerated into a list during initialisation. """ super(Chooser,self).__init__()
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def shutdown(self): if self.dataReady("control"): message = self.recv("control") if isinstance(message, shutdownMicroprocess): self.send(message, "signal") return True return False
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def __init__(self): self._nodes = {} self._params = {} self._num_input = 0 self._num_param = 0
TuSimple/mxnet
[ 28, 25, 28, 1, 1457693796 ]
def from_onnx(self, graph): """Construct symbol from onnx graph. Parameters ---------- graph : onnx protobuf object The loaded onnx graph Returns ------- sym :symbol.Symbol The returned mxnet symbol params : dict A dict of name: nd.array pairs, used as pretrained weights """ # parse network inputs, aka parameters for init_tensor in graph.initializer: if not init_tensor.name.strip(): raise ValueError("Tensor's name is required.") self._params[init_tensor.name] = self._parse_array(init_tensor) # converting GraphProto message for i in graph.input: if i.name in self._params: # i is a param instead of input self._nodes[i.name] = symbol.Variable(name=i.name, shape=self._params[i.name].shape) else: self._nodes[i.name] = symbol.Variable(name=i.name) # For storing arg and aux params for the graph. auxDict = {} argDict = {} # constructing nodes, nodes are stored as directed acyclic graph # converting NodeProto message for node in graph.node: op_name = node.op_type node_name = node.name.strip() node_name = node_name if node_name else None onnx_attr = self._parse_attr(node.attribute) inputs = [self._nodes[i] for i in node.input] mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs) for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))): self._nodes[k] = mxnet_sym[i] # splitting params into args and aux params for args in mxnet_sym.list_arguments(): if args in self._params: argDict.update({args: nd.array(self._params[args])}) for aux in mxnet_sym.list_auxiliary_states(): if aux in self._params: auxDict.update({aux: nd.array(self._params[aux])}) # now return the outputs out = [self._nodes[i.name] for i in graph.output] if len(out) > 1: out = symbol.Group(out) else: out = out[0] return out, argDict, auxDict
TuSimple/mxnet
[ 28, 25, 28, 1, 1457693796 ]
def testConstants(self): self.assertIsInstance(NSMetadataQueryDidStartGatheringNotification, unicode) self.assertIsInstance(NSMetadataQueryGatheringProgressNotification, unicode) self.assertIsInstance(NSMetadataQueryDidFinishGatheringNotification, unicode) self.assertIsInstance(NSMetadataQueryDidUpdateNotification, unicode) self.assertIsInstance(NSMetadataQueryResultContentRelevanceAttribute, unicode) self.assertIsInstance(NSMetadataQueryUserHomeScope, unicode) self.assertIsInstance(NSMetadataQueryLocalComputerScope, unicode) self.assertIsInstance(NSMetadataQueryNetworkScope, unicode)
albertz/music-player
[ 483, 61, 483, 16, 1345772141 ]
def testConstants10_7(self): self.assertIsInstance(NSMetadataQueryLocalDocumentsScope, unicode) self.assertIsInstance(NSMetadataQueryUbiquitousDocumentsScope, unicode) self.assertIsInstance(NSMetadataQueryUbiquitousDataScope, unicode) self.assertIsInstance(NSMetadataItemFSNameKey, unicode) self.assertIsInstance(NSMetadataItemDisplayNameKey, unicode) self.assertIsInstance(NSMetadataItemURLKey, unicode) self.assertIsInstance(NSMetadataItemPathKey, unicode) self.assertIsInstance(NSMetadataItemFSSizeKey, unicode) self.assertIsInstance(NSMetadataItemFSCreationDateKey, unicode) self.assertIsInstance(NSMetadataItemFSContentChangeDateKey, unicode) self.assertIsInstance(NSMetadataItemIsUbiquitousKey, unicode) self.assertIsInstance(NSMetadataUbiquitousItemHasUnresolvedConflictsKey, unicode) self.assertIsInstance(NSMetadataUbiquitousItemIsDownloadedKey, unicode) self.assertIsInstance(NSMetadataUbiquitousItemIsDownloadingKey, unicode) self.assertIsInstance(NSMetadataUbiquitousItemIsUploadedKey, unicode) self.assertIsInstance(NSMetadataUbiquitousItemIsUploadingKey, unicode) self.assertIsInstance(NSMetadataUbiquitousItemPercentDownloadedKey, unicode) self.assertIsInstance(NSMetadataUbiquitousItemPercentUploadedKey, unicode)
albertz/music-player
[ 483, 61, 483, 16, 1345772141 ]
def uniform_path_format(native_path): """Alters the path if needed to be separated by forward slashes.""" return posixpath.normpath(native_path.replace(os.sep, posixpath.sep))
nwjs/chromium.src
[ 136, 133, 136, 45, 1453904223 ]
def aggregate_components_from_owners(all_owners_data, root): """Converts the team/component/os tags parsed from OWNERS into mappings. Args: all_owners_data (dict): A mapping from relative path to a dir to a dict mapping the tag names to their values. See docstring for scrape_owners. root (str): the path to the src directory. Returns: A tuple (data, warnings, stats) where data is a dict of the form {'component-to-team': {'Component1': 'team1@chr...', ...}, 'teams-per-component': {'Component1': ['team1@chr...', 'team2@chr...]}, 'dir-to-component': {'/path/to/1': 'Component1', ...}} 'dir-to-team': {'/path/to/1': 'team1@', ...}} , warnings is a list of strings, stats is a dict of form {'OWNERS-count': total number of OWNERS files, 'OWNERS-with-component-only-count': number of OWNERS have # COMPONENT, 'OWNERS-with-team-and-component-count': number of OWNERS have TEAM and COMPONENT, 'OWNERS-count-by-depth': {directory depth: number of OWNERS}, 'OWNERS-with-component-only-count-by-depth': {directory depth: number of OWNERS have COMPONENT at this depth}, 'OWNERS-with-team-and-component-count-by-depth':{directory depth: ...}} """ stats = {} num_total = 0 num_with_component = 0 num_with_team_component = 0 num_total_by_depth = defaultdict(int) num_with_component_by_depth = defaultdict(int) num_with_team_component_by_depth = defaultdict(int) warnings = [] teams_per_component = defaultdict(set) topmost_team = {} dir_to_component = {} dir_missing_info_by_depth = defaultdict(list) dir_to_team = {} for rel_dirname, owners_data in all_owners_data.iteritems(): # Normalize this relative path to posix-style to make counting separators # work correctly as a means of obtaining the file_depth. rel_path = uniform_path_format(os.path.relpath(rel_dirname, root)) file_depth = 0 if rel_path == '.' else rel_path.count(posixpath.sep) + 1 num_total += 1 num_total_by_depth[file_depth] += 1 component = owners_data.get('component') team = owners_data.get('team') os_tag = owners_data.get('os') if os_tag and component: component = '%s(%s)' % (component, os_tag) if team: dir_to_team[rel_dirname] = team if component: num_with_component += 1 num_with_component_by_depth[file_depth] += 1 dir_to_component[rel_dirname] = component if team: num_with_team_component += 1 num_with_team_component_by_depth[file_depth] += 1 teams_per_component[component].add(team) if component not in topmost_team or file_depth < topmost_team[ component]['depth']: topmost_team[component] = {'depth': file_depth, 'team': team} else: rel_owners_path = uniform_path_format(os.path.join(rel_dirname, 'OWNERS')) warnings.append('%s has no COMPONENT tag' % rel_owners_path) if not team and not os_tag: dir_missing_info_by_depth[file_depth].append(rel_owners_path) mappings = { 'component-to-team': { k: v['team'] for k, v in topmost_team.iteritems() }, 'teams-per-component': { k: sorted(list(v)) for k, v in teams_per_component.iteritems() }, 'dir-to-component': dir_to_component, 'dir-to-team': dir_to_team, } warnings += validate_one_team_per_component(mappings) stats = {'OWNERS-count': num_total, 'OWNERS-with-component-only-count': num_with_component, 'OWNERS-with-team-and-component-count': num_with_team_component, 'OWNERS-count-by-depth': num_total_by_depth, 'OWNERS-with-component-only-count-by-depth': num_with_component_by_depth, 'OWNERS-with-team-and-component-count-by-depth': num_with_team_component_by_depth, 'OWNERS-missing-info-by-depth': dir_missing_info_by_depth} return mappings, warnings, stats
nwjs/chromium.src
[ 136, 133, 136, 45, 1453904223 ]
def __init__(self, sheet_key): self.sheet_key = sheet_key
pmarks-net/dtella
[ 5, 2, 5, 11, 1426380623 ]
def __init__(self, channel): self._closed = False self._local_namespace = {} self.channel = channel self.box = Box(self) self.async_replies = {} self.sync_replies = {} self.module_cache = {} self.remote_conn = self.sync_request("handle_getconn") # user APIs: self.modules = RootImporter(self) self.namespace = AttrFrontend(self.remote_conn._local_namespace) self.execute("")
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def close(self): """closes down the connection and releases all cyclic dependecies""" if not self._closed: self.box.close() self.channel.close() self._closed = True self._local_namespace = None self.channel = None self.box = None self.async_replies = None self.sync_replies = None self.module_cache = None self.modules = None self.remote_conn = None self.namespace = None
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def send(self, type, seq, obj): if self._closed: raise EOFError("the connection is closed") return self.channel.send(type, seq, self.box.pack(obj))
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def send_request(self, handlername, *args): return self.send(FRAME_REQUEST, None, (handlername, args))
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def send_result(self, seq, obj): self.send(FRAME_RESULT, seq, obj)
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def dispatch_result(self, seq, obj): if seq in self.async_replies: self.async_replies.pop(seq)(obj, False) else: self.sync_replies[seq] = obj
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def dispatch_exception(self, seq, obj): excobj = load_exception(obj) if seq in self.async_replies: self.async_replies.pop(seq)(excobj, True) else: raise_exception(*excobj)
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def poll(self): """if available, serves a single request, otherwise returns (non-blocking serve)""" if self.channel.is_available(): self.serve() return True else: return False
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def serve(self): """serves a single request (may block)""" type, seq, data = self.channel.recv() if type == FRAME_RESULT: self.dispatch_result(seq, self.box.unpack(data)) elif type == FRAME_REQUEST: self.dispatch_request(seq, *self.box.unpack(data)) elif type == FRAME_EXCEPTION: self.dispatch_exception(seq, self.box.unpack(data)) else: raise ValueError("invalid frame type (%d)" % (type,))
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def sync_request(self, handlername, *args): """performs a synchronous (blocking) request""" seq = self.send_request(handlername, *args) while seq not in self.sync_replies: self.serve() return self.sync_replies.pop(seq)
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def async_request(self, callback, handlername, *args): """performs an asynchronous (non-blocking) request""" seq = self.send_request(handlername, *args) self.async_replies[seq] = callback
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def rimport(self, modulename): """imports a module by name (as a string)""" if modulename not in self.module_cache: module = self.sync_request("handle_import", modulename) self.module_cache[modulename] = module return self.module_cache[modulename]
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def handle_decref(self, oid): self.box.decref(oid)
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def handle_delattr(self, oid, name): delattr(self.box[oid], name)
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def handle_setattr(self, oid, name, value): setattr(self.box[oid], name, value)
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def handle_getitem(self, oid, index): return self.box[oid][index]
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def handle_call(self, oid, args, kwargs): return self.box[oid](*args, **kwargs)
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def handle_str(self, oid): return str(self.box[oid])
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def handle_import(self, modulename): return __import__(modulename, None, None, modulename.split(".")[-1])
eBay/restcommander
[ 904, 146, 904, 3, 1389402375 ]
def __init__(self, build_dir=None, target_os=None, target_cpu=None, is_debug=None, is_verbose=None, apk_name='MojoRunner.apk'): '''Function arguments take precedence over GN args and default values.''' assert target_os in (None, Config.OS_ANDROID, Config.OS_CHROMEOS, Config.OS_LINUX, Config.OS_MAC, Config.OS_WINDOWS) assert target_cpu in (None, Config.ARCH_X86, Config.ARCH_X64, Config.ARCH_ARM) assert is_debug in (None, True, False) assert is_verbose in (None, True, False) self.values = { 'build_dir': build_dir, 'target_os': self.GetHostOS(), 'target_cpu': self.GetHostCPU(), 'is_debug': True, 'is_verbose': True, 'dcheck_always_on': False, 'is_asan': False, 'apk_name': apk_name, } self._ParseGNArgs() if target_os is not None: self.values['target_os'] = target_os if target_cpu is not None: self.values['target_cpu'] = target_cpu if is_debug is not None: self.values['is_debug'] = is_debug if is_verbose is not None: self.values['is_verbose'] = is_verbose
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def GetHostOS(): if sys.platform == 'linux2': return Config.OS_LINUX if sys.platform == 'darwin': return Config.OS_MAC if sys.platform == 'win32': return Config.OS_WINDOWS raise NotImplementedError('Unsupported host OS')
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def GetHostCPU(): # Derived from //native_client/pynacl/platform.py machine = platform.machine() if machine in ('x86', 'x86-32', 'x86_32', 'x8632', 'i386', 'i686', 'ia32', '32'): return Config.ARCH_X86 if machine in ('x86-64', 'amd64', 'AMD64', 'x86_64', 'x8664', '64'): return Config.ARCH_X64 if machine.startswith('arm'): return Config.ARCH_ARM raise Exception('Cannot identify CPU arch: %s' % machine)
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def build_dir(self): '''Build directory path.''' return self.values['build_dir']
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def target_os(self): '''OS of the build/test target.''' return self.values['target_os']
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def target_cpu(self): '''CPU arch of the build/test target.''' return self.values['target_cpu']
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def is_debug(self): '''Is Debug build?''' return self.values['is_debug']
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def is_verbose(self): '''Should print additional logging information?''' return self.values['is_verbose']
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def dcheck_always_on(self): '''DCHECK is fatal even in release builds''' return self.values['dcheck_always_on']
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def is_asan(self): '''Is ASAN build?''' return self.values['is_asan']
junhuac/MQUIC
[ 2, 1, 2, 1, 1459966120 ]
def answer(query): parts = query.query.split() if len(parts) < 2: return [] try: args = list(map(float, parts[1:])) except: return [] func = parts[0] answer = None if func == b'min': answer = min(args) elif func == b'max': answer = max(args) elif func == b'avg': answer = sum(args) / len(args) elif func == b'sum': answer = sum(args) elif func == b'prod': answer = reduce(mul, args, 1) if answer is None: return [] return [{'answer': unicode(answer)}]
asciimoo/searx
[ 12627, 1696, 12627, 345, 1381854051 ]
def __init__(self, key, defining_class): # type: (Any, Type[KeyBasedCompareMixin]) -> None self._compare_key = key self._defining_class = defining_class
google/material-design-icons
[ 47694, 9554, 47694, 203, 1412791288 ]
def __lt__(self, other): # type: (Any) -> bool return self._compare(other, operator.__lt__)
google/material-design-icons
[ 47694, 9554, 47694, 203, 1412791288 ]
def __gt__(self, other): # type: (Any) -> bool return self._compare(other, operator.__gt__)
google/material-design-icons
[ 47694, 9554, 47694, 203, 1412791288 ]
def __eq__(self, other): # type: (Any) -> bool return self._compare(other, operator.__eq__)
google/material-design-icons
[ 47694, 9554, 47694, 203, 1412791288 ]
def __init__(self, socket, config=None): if config is None: config = {} # Do not use mutables as default arguments! threading.Thread.__init__(self) self.config = SimpleConfig(config) if type(config) == type({}) else config self.message_id = 0 self.unanswered_requests = {} self.subscriptions = {} self.debug = False self.lock = threading.Lock() self.pending_transactions_for_notifications = [] self.callbacks = {} self.running = True self.daemon = True if socket: self.pipe = util.SocketPipe(socket) self.network = None else: self.network = Network(config) self.pipe = util.QueuePipe(send_queue=self.network.requests_queue) self.network.start(self.pipe.get_queue) for key in ['status','banner','updated','servers','interfaces']: value = self.network.get_status_value(key) self.pipe.get_queue.put({'method':'network.status', 'params':[key, value]}) # status variables self.status = 'connecting' self.servers = {} self.banner = '' self.blockchain_height = 0 self.server_height = 0 self.interfaces = []
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]
def run(self): while self.is_running(): try: response = self.pipe.get() except util.timeout: continue if response is None: break self.process(response) self.trigger_callback('stop') if self.network: self.network.stop() print_error("NetworkProxy: terminating")
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]
def send(self, messages, callback): """return the ids of the requests that we sent""" # detect subscriptions sub = [] for message in messages: m, v = message if m[-10:] == '.subscribe': sub.append(message) if sub: with self.lock: if self.subscriptions.get(callback) is None: self.subscriptions[callback] = [] for message in sub: if message not in self.subscriptions[callback]: self.subscriptions[callback].append(message) with self.lock: requests = [] ids = [] for m in messages: method, params = m request = { 'id':self.message_id, 'method':method, 'params':params } self.unanswered_requests[self.message_id] = method, params, callback ids.append(self.message_id) requests.append(request) if self.debug: print_error("-->", request) self.message_id += 1 self.pipe.send_all(requests) return ids
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]
def get_servers(self): return self.servers
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]
def get_header(self, height): return self.synchronous_get([('network.get_header',[height])])[0]
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]
def get_server_height(self): return self.server_height
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]
def is_connecting(self): return self.status == 'connecting'
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]
def get_parameters(self): return self.synchronous_get([('network.get_parameters',[])])[0]
Kefkius/electrum-frc
[ 3, 5, 3, 1, 1423949169 ]