func_code_string stringlengths 52 1.94M | func_documentation_string stringlengths 1 47.2k |
|---|---|
def add_cookies_to_web_driver(driver, cookies):
for cookie in cookies:
driver.add_cookie(convert_cookie_to_dict(cookie))
return driver | Sets cookies in an existing WebDriver session. |
def configure(self, options, conf):
self.conf = conf
self.when = options.browser_closer_when | Configure plugin. Plugin is enabled by default. |
def index_path(self, root):
basename = os.path.basename(root)
if os.path.splitext(basename)[0] != '__init__' and basename.startswith('_'):
return
location = self._determine_location_for(root)
if os.path.isfile(root):
self._index_module(root, location)
... | Index a path.
:param root: Either a package directory, a .so or a .py module. |
def get_or_create_index(self, paths=None, name=None, refresh=False):
if not paths:
paths = sys.path
if not name:
name = 'default'
self._name = name
idx_dir = get_cache_dir()
idx_file = os.path.join(idx_dir, name + '.json')
if os.path.exist... | Get index with given name from cache. Create if it doesn't exists. |
def symbol_scores(self, symbol):
scores = []
path = []
# sys.path sys path -> import sys
# os.path.basename os.path basename -> import os.path
# basename os.path basename -> from os.path import basename
# path.ba... | Find matches for symbol.
:param symbol: A . separated symbol. eg. 'os.path.basename'
:returns: A list of tuples of (score, package, reference|None),
ordered by score from highest to lowest. |
def find(self, path):
path = path.split('.')
node = self
while node._parent:
node = node._parent
for name in path:
node = node._tree.get(name, None)
if node is None or type(node) is float:
return None
return node | Return the node for a path, or None. |
def location_for(self, path):
path = path.split('.')
node = self
while node._parent:
node = node._parent
location = node.location
for name in path:
tree = node._tree.get(name, None)
if tree is None or type(tree) is float:
... | Return the location code for a path. |
def select_option(self, option):
items_list = self.get_options()
for item in items_list:
if item.get_attribute("value") == option:
item.click()
break | Performs selection of provided item from Web List
@params option - string item name |
def get_attribute_selected(self, attribute):
items_list = self.get_options()
return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None) | Performs search of selected item from Web List
Return attribute of selected item
@params attribute - string attribute name |
def select_by_visible_text(self, text):
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
... | Performs search of selected item from Web List
@params text - string visible text |
def wait(*args, **kwargs):
kwargs.setdefault('sleep_seconds', (1, None))
kwargs.setdefault('expected_exceptions', WebDriverException)
kwargs.setdefault('timeout_seconds', webium.settings.wait_timeout)
return wait_lib(*args, **kwargs) | Wrapping 'wait()' method of 'waiting' library with default parameter values.
WebDriverException is ignored in the expected exceptions by default. |
def parse_ast(source, filename=None):
if isinstance(source, text_type) and sys.version_info[0] == 2:
# ast.parse() on Python 2 doesn't like encoding declarations
# in Unicode strings
source = CODING_COOKIE_RE.sub(r'\1', source, 1)
return ast.parse(source, filename or '<unknown>') | Parse source into a Python AST, taking care of encoding. |
def find_unresolved_and_unreferenced_symbols(self):
unresolved = set()
unreferenced = self._definitions.copy()
self._collect_unresolved_and_unreferenced(set(), set(), unresolved, unreferenced,
frozenset(self._definitions), start=True)
... | Find any unresolved symbols, and unreferenced symbols from this scope.
:returns: ({unresolved}, {unreferenced}) |
def get_item(key):
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
try:
return json.loads(open(CACHED_KEY_FILE, "rb").read().decode('UTF-8'))["_"]
except (IOError, ValueError):
return None | Return content in cached file in JSON format |
def set_item(key,value):
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
open(CACHED_KEY_FILE, "wb").write(json.dumps({"_": value}).encode('UTF-8'))
return value | Write JSON content from value argument to cached file and return |
def delete_item(key):
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
if os.path.isfile(CACHED_KEY_FILE):
os.remove(CACHED_KEY_FILE) | Delete cached file if present |
def __parse_json_data(self, data):
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json") | Process Json data
:@param data
:@type data: json/dict
:throws TypeError |
def __parse_json_file(self, file_path):
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data) | Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError |
def __get_value_from_data(self, key, data):
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key) | Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError |
def at(self, root):
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self | Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError |
def reset(self, data={}):
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self | JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self |
def __store_query(self, query_items):
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items) | Make where clause
:@param query_items
:@type query_items: dict |
def __execute_queries(self):
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
... | Execute all condition and filter result data |
def where(self, key, operator, value):
self.__store_query({"key": key, "operator": operator, "value": value})
return self | Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self |
def or_where(self, key, operator, value):
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self | Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self |
def nth(self, index):
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index] | Getting the nth element of the collection
:@param index
:@type index: int
:@return object |
def sum(self, property):
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total | Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float |
def max(self, property):
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists") | Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError |
def avg(self, property):
self.__prepare()
return self.sum(property) / self.count() | Getting average according to given property
:@param property
:@type property: string
:@return average: int/float |
def chunk(self, size=0):
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
sel... | Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List |
def group_by(self, property):
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
r... | Getting the grouped result by the given property
:@param property
:@type property: string
:@return self |
def sort(self, order="asc"):
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self | Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self |
def sort_by(self, property, order="asc"):
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
... | Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self |
def _match(self, x, op, y):
if (op not in self.condition_mapper):
raise ValueError('Invalid where condition given')
func = getattr(self, self.condition_mapper.get(op))
return func(x, y) | Compare the given `x` and `y` based on `op`
:@param x, y, op
:@type x, y: mixed
:@type op: string
:@return bool
:@throws ValueError |
def overrides(method):
for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
if hasattr(super_class, method.__name__):
super_method = getattr(super_class, method.__name__)
if hasattr(super_method, "__finalized__"):
finalized = getattr(super_... | Decorator to indicate that the decorated method overrides a method in
superclass.
The decorator code is executed while loading class. Using this method
should have minimal runtime performance implications.
This is based on my idea about how to do this and fwc:s highly improved
algorithm for the imp... |
def _get_base_class_names(frame):
co, lasti = frame.f_code, frame.f_lasti
code = co.co_code
extends = []
for (op, oparg) in op_stream(code, lasti):
if op in dis.hasconst:
if type(co.co_consts[oparg]) == str:
extends = []
elif op in dis.hasname:
... | Get baseclass names from the code object |
def get_charset(message, default="utf-8"):
if message.get_content_charset():
return message.get_content_charset()
if message.get_charset():
return message.get_charset()
return default | Get the message charset |
def load_tlds():
file = os.path.join(os.path.dirname(__file__),
'assets',
'tlds-alpha-by-domain.txt')
with open(file) as fobj:
return [elem for elem in fobj.read().lower().splitlines()[1:]
if "--" not in elem] | Load all legal TLD extensions from assets |
def parse_text_urls(mesg):
rval = []
loc = 0
for match in URLRE.finditer(mesg):
if loc < match.start():
rval.append(Chunk(mesg[loc:match.start()], None))
# Turn email addresses into mailto: links
email = match.group("email")
if email and "mailto" not in email... | Parse a block of text, splitting it into its url and non-url
components. |
def extract_with_context(lst, pred, before_context, after_context):
rval = []
start = 0
length = 0
while start < len(lst):
usedfirst = False
usedlast = False
# Extend to the next match.
while start + length < len(lst) and length < before_context + 1 \
... | Extract URL and context from a given chunk. |
def extracturls(mesg):
lines = NLRE.split(mesg)
# The number of lines of context above to provide.
# above_context = 1
# The number of lines of context below to provide.
# below_context = 1
# Plan here is to first transform lines into the form
# [line_fragments] where each fragment is a... | Given a text message, extract all the URLs found in the message, along
with their surrounding context. The output is a list of sequences of Chunk
objects, corresponding to the contextual regions extracted from the string. |
def extracthtmlurls(mesg):
chunk = HTMLChunker()
chunk.feed(mesg)
chunk.close()
# above_context = 1
# below_context = 1
def somechunkisurl(chunks):
for chnk in chunks:
if chnk.url is not None:
return True
return False
return extract_with_conte... | Extract URLs with context from html type message. Similar to extracturls. |
def decode_bytes(byt, enc='utf-8'):
try:
strg = byt.decode(enc)
except UnicodeDecodeError as err:
strg = "Unable to decode message:\n{}\n{}".format(str(byt), err)
except (AttributeError, UnicodeEncodeError):
# If byt is already a string, just return it
return byt
ret... | Given a string or bytes input, return a string.
Args: bytes - bytes or string
enc - encoding to use for decoding the byte string. |
def decode_msg(msg, enc='utf-8'):
# We avoid the get_payload decoding machinery for raw
# content-transfer-encodings potentially containing non-ascii characters,
# such as 8bit or binary, as these are encoded using raw-unicode-escape which
# seems to prevent subsequent utf-8 decoding.
cte = str... | Decodes a message fragment.
Args: msg - A Message object representing the fragment
enc - The encoding to use for decoding the message |
def msgurls(msg, urlidx=1):
# Written as a generator so I can easily choose only
# one subpart in the future (e.g., for
# multipart/alternative). Actually, I might even add
# a browser for the message structure?
enc = get_charset(msg)
if msg.is_multipart():
for part in msg.get_payl... | Main entry function for urlscan.py |
def shorten_url(url, cols, shorten):
cols = ((cols - 6) * .85) # 6 cols for urlref and don't use while line
if shorten is False or len(url) < cols:
return url
split = int(cols * .5)
return url[:split] + "..." + url[-split:] | Shorten long URLs to fit on one line. |
def grp_list(items):
grp = []
res = []
for item in items:
if isinstance(item, urwid.Divider):
res.append(grp)
grp = [items[0]]
else:
grp.append(item)
res.append(grp)
return res[1:] | Organize list of items [a,2,3,4,a,4,2,a,1, etc...] like:
[[a,2,3,4], [a,4,2], [a,1]], where 'a' is a urwid.Divider |
def splittext(text, search, attr):
if search:
pat = re.compile("({})".format(re.escape(search)), re.IGNORECASE)
else:
return text
final = pat.split(text)
final = [(attr, i) if i.lower() == search.lower() else i for i in final]
return final | Split a text string by search string and add Urwid display attribute to
the search term.
Args: text - string
search - search string
attr - attribute string to add
Returns: urwid markup list ["string", ("default", " mo"), "re string"]
for search="mo", text="string more stri... |
def main(self):
self.loop = urwid.MainLoop(self.top, self.palettes[self.palette_names[0]], screen=self.tui,
handle_mouse=False, input_filter=self.handle_keys,
unhandled_input=self.unhandled)
self.loop.run() | Urwid main event loop |
def handle_keys(self, keys, raw):
for j, k in enumerate(keys):
if self.search is True:
text = "Search: {}".format(self.search_string)
if k == 'enter':
# Catch 'enter' key to prevent opening URL in mkbrowseto
self.enter ... | Handle widget default keys
- 'Enter' or 'space' to load URL
- 'Enter' to end search mode
- add 'space' to search string in search mode
- Workaround some small positioning bugs |
def unhandled(self, key):
self.key = key
self.size = self.tui.get_cols_rows()
if self.search is True:
if self.enter is False and self.no_matches is False:
if len(key) == 1 and key.isprintable():
self.search_string += key
se... | Handle other keyboard actions not handled by the ListBox widget. |
def _open_url(self):
load_text = "Loading URL..." if not self.run else "Executing: {}".format(self.run)
if os.environ.get('BROWSER') not in ['elinks', 'links', 'w3m', 'lynx']:
self._footer_start_thread(load_text, 5) | <Enter> or <space> |
def _help_menu(self):
if self.help_menu is False:
self.focus_pos_saved = self.top.body.focus_position
help_men = "\n".join(["{} - {}".format(i, j.__name__.strip('_'))
for i, j in self.keys.items() if j.__name__ !=
... | F1 |
def _search_key(self):
if self.urls:
self.search = True
if self.compact is True:
self.compact = False
self.items, self.items_com = self.items_com, self.items
else:
return
self.no_matches = False
self.search_stri... | / |
def _digits(self):
self.number += self.key
try:
if self.compact is False:
self.top.body.focus_position = \
self.items.index(self.items_com[max(int(self.number) - 1, 0)])
else:
self.top.body.focus_position = \
... | 0-9 |
def _top(self):
# Goto top of the list
self.top.body.focus_position = 2 if self.compact is False else 0
self.top.keypress(self.size, "") | g |
def _bottom(self):
# Goto bottom of the list
self.top.body.focus_position = len(self.items) - 1
self.top.keypress(self.size, "") | G |
def _shorten(self):
# Toggle shortened URL for selected item
fpo = self.top.body.focus_position
url_idx = len([i for i in self.items[:fpo + 1]
if isinstance(i, urwid.Columns)]) - 1
if self.compact is False and fpo <= 1:
return
url = sel... | s |
def _all_shorten(self):
# Toggle all shortened URLs
self.shorten = not self.shorten
urls = iter(self.urls)
for item in self.items:
# Each Column has (Text, Button). Update the Button label
if isinstance(item, urwid.Columns):
item[1].set_la... | S |
def _all_escape(self):
# Toggle all escaped URLs
self.unesc = not self.unesc
self.urls, self.urls_unesc = self.urls_unesc, self.urls
urls = iter(self.urls)
for item in self.items:
# Each Column has (Text, Button). Update the Button label
if isinst... | u |
def _context(self):
# Show/hide context
if self.search_string:
# Reset search when toggling compact mode
footerwid = urwid.AttrMap(urwid.Text(""), 'default')
self.top.footer = footerwid
self.search_string = ""
self.items = self.items_o... | c |
def _clipboard(self, pri=False):
# Copy highlighted url to clipboard
fpo = self.top.body.focus_position
url_idx = len([i for i in self.items[:fpo + 1]
if isinstance(i, urwid.Columns)]) - 1
if self.compact is False and fpo <= 1:
return
u... | C |
def _palette(self):
# Loop through available palettes
self.palette_idx += 1
try:
self.loop.screen.register_palette(self.palettes[self.palette_names[self.palette_idx]])
except IndexError:
self.loop.screen.register_palette(self.palettes[self.palette_names[0... | p |
def _config_create(self):
# Create ~/.config/urlscan/config.json if if doesn't exist
if not exists(self.conf):
try:
# Python 2/3 compatible recursive directory creation
os.makedirs(dirname(expanduser(self.conf)))
except OSError as err:
... | --genconf |
def _footer_start_thread(self, text, time):
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
load_thread = Thread(target=self._loading_thread, args=(time,))
load_thread.daemon = True
load_thread.start() | Display given text in the footer. Clears after <time> seconds |
def _loading_thread(self, time):
sleep(time)
self.number = "" # Clear URL selection number
text = "Search: {}".format(self.search_string)
if self.search_string:
footer = 'search'
else:
footer = 'default'
text = ""
footerwid = ... | Simple thread to wait <time> seconds after launching a URL or
displaying a URL selection number, clearing the screen and clearing the
footer loading message. |
def _search(self):
text = "Search: {}".format(self.search_string)
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
search_items = []
for grp in self.items_org:
done = False
for idx, item in enumerate(grp):
... | Search - search URLs and text. |
def draw_screen(self, size):
self.tui.clear()
canvas = self.top.render(size, focus=True)
self.tui.draw_screen(size, canvas) | Render curses screen |
def mkbrowseto(self, url):
# Try-except block to work around webbrowser module bug
# https://bugs.python.org/issue31014
try:
browser = os.environ['BROWSER']
except KeyError:
pass
else:
del os.environ['BROWSER']
webbrowser.r... | Create the urwid callback function to open the web browser or call
another function with the URL. |
def process_urls(self, extractedurls, dedupe, shorten):
cols, _ = urwid.raw_display.Screen().get_cols_rows()
items = []
urls = []
first = True
for group, usedfirst, usedlast in extractedurls:
if first:
first = False
items.append(ur... | Process the 'extractedurls' and ready them for either the curses browser
or non-interactive output
Args: extractedurls
dedupe - Remove duplicate URLs from list
Returns: items - List of widgets for the ListBox
urls - List of all URLs |
def _get_key_file_path():
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME) | Return the key file path. |
def load_key_file(self):
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(... | Try to load the client key for the current ip. |
def save_key_file(self):
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file... | Save the current client key. |
def _send_register_payload(self, websocket):
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
... | Send the register payload. |
def _register(self):
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register fai... | Register wrapper. |
def register(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register()) | Pair client with tv. |
def _command(self, msg):
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('comm... | Send a command to the tv. |
def command(self, request_type, uri, payload):
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload'... | Build and send a command. |
def send_message(self, message, icon_path=None):
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64en... | Show a floating message. |
def get_apps(self):
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints') | Return all apps. |
def get_current_app(self):
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId') | Get the current app id. |
def get_services(self):
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services') | Get all services. |
def get_software_info(self):
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload') | Return the current software status. |
def get_inputs(self):
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices') | Get all inputs. |
def get_audio_status(self):
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload') | Get the current audio status |
def get_volume(self):
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume') | Get the current volume. |
def set_volume(self, volume):
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
}) | Set volume. |
def get_channels(self):
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList') | Get all tv channels. |
def get_current_channel(self):
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload') | Get the current tv channel. |
def get_channel_info(self):
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload') | Get the current channel info. |
def _load_plugins_itr(pgroup, safe=True):
for res in pkg_resources.iter_entry_points(pgroup):
try:
yield res.load()
except ImportError:
if safe:
continue
raise | .. seealso:: the doc of :func:`load_plugins` |
def select_by_key(items, sort_fn=sorted):
itr = anyconfig.utils.concat(((k, v) for k in ks) for ks, v in items)
return list((k, sort_fn(t[1] for t in g))
for k, g
in anyconfig.utils.groupby(itr, operator.itemgetter(0))) | :param items: A list of tuples of keys and values, [([key], val)]
:return: A list of tuples of key and values, [(key, [val])]
>>> select_by_key([(["a", "aaa"], 1), (["b", "bb"], 2), (["a"], 3)])
[('a', [1, 3]), ('aaa', [1]), ('b', [2]), ('bb', [2])] |
def list_by_x(prs, key):
if key == "type":
kfn = operator.methodcaller(key)
res = sorted(((k, sort_by_prio(g)) for k, g
in anyconfig.utils.groupby(prs, kfn)),
key=operator.itemgetter(0))
elif key == "extensions":
res = select_by_key(((p.ext... | :param key: Grouping key, "type" or "extensions"
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default |
def findall_with_pred(predicate, prs):
return sorted((p for p in prs if predicate(p)),
key=operator.methodcaller("priority"), reverse=True) | :param predicate: any callable to filter results
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return: A list of appropriate processor classes or [] |
def maybe_processor(type_or_id, cls=anyconfig.models.processor.Processor):
if isinstance(type_or_id, cls):
return type_or_id
if type(type_or_id) == type(cls) and issubclass(type_or_id, cls):
return type_or_id()
return None | :param type_or_id:
Type of the data to process or ID of the processor class or
:class:`anyconfig.models.processor.Processor` class object or its
instance
:param cls: A class object to compare with 'type_or_id'
:return: Processor instance or None |
def find_by_type_or_id(type_or_id, prs):
def pred(pcls):
return pcls.cid() == type_or_id or pcls.type() == type_or_id
pclss = findall_with_pred(pred, prs)
if not pclss:
raise UnknownProcessorTypeError(type_or_id)
return pclss | :param type_or_id: Type of the data to process or ID of the processor class
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return:
A list of processor classes to process files of given data type or
processor 'type_or_id' found by its ID
:raises: UnknownProcessor... |
def find_by_fileext(fileext, prs):
def pred(pcls):
return fileext in pcls.extensions()
pclss = findall_with_pred(pred, prs)
if not pclss:
raise UnknownFileTypeError("file extension={}".format(fileext))
return pclss | :param fileext: File extension
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return: A list of processor class to processor files with given extension
:raises: UnknownFileTypeError |
def find_by_maybe_file(obj, prs):
if not isinstance(obj, IOInfo):
obj = anyconfig.ioinfo.make(obj)
return find_by_fileext(obj.extension, prs) | :param obj:
a file path, file or file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param cps_by_ext: A list of processor classes
:return: A list of processor classes to process given (maybe) file
:raises: UnknownFileTypeError |
def findall(obj, prs, forced_type=None,
cls=anyconfig.models.processor.Processor):
if (obj is None or not obj) and forced_type is None:
raise ValueError("The first argument 'obj' or the second argument "
"'forced_type' must be something other than "
... | :param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo` (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
... |
def find(obj, prs, forced_type=None, cls=anyconfig.models.processor.Processor):
if forced_type is not None:
processor = maybe_processor(forced_type, cls=cls)
if processor is not None:
return processor
pclss = findall(obj, prs, forced_type=forced_type, cls=cls)
return pclss[0... | :param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.