repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
whyscream/dspam-milter | dspam/client.py | DspamClient.process | python | def process(self, message, user):
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user] | Process a message. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L526-L549 | [
"def connect(self):\n \"\"\"\n Connect to TCP or domain socket, and process the server LMTP greeting.\n\n \"\"\"\n # extract proto from socket setting\n try:\n (proto, spec) = self.socket.split(':')\n except ValueError:\n raise DspamClientError(\n 'Failed to parse DSPAM so... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelTypedField.make_field | python | def make_field(self, types, domain, items):
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += self.make_xref(
self.rolename, domain, fieldarg, nodes.strong)
if fieldarg in types:
par += nodes.Text(' : ')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if (len(fieldtype) == 1 and
isinstance(fieldtype[0], nodes.Text)):
typename = u''.join(n.astext() for n in fieldtype)
par += self.make_xref(self.typerolename, domain, typename)
else:
par += fieldtype
par += nodes.Text('')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody) | Copy+Paste of TypedField.make_field() from Sphinx version 1.2.3. The first
and second nodes.Text() instance are changed in this implementation to
be ' : ' and '' respectively (instead of ' (' and ')').
TODO: Ask sphinx devs if there is a better way to support
this that is less copy+pasty. (thomasvandoren, 2015-03-17) | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L72-L110 | [
"def handle_item(fieldarg, content):\n par = nodes.paragraph()\n par += self.make_xref(\n self.rolename, domain, fieldarg, nodes.strong)\n if fieldarg in types:\n par += nodes.Text(' : ')\n # NOTE: using .pop() here to prevent a single type node to be\n # inserted twice into the... | class ChapelTypedField(TypedField):
"""Override TypedField in order to change output format."""
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelObject._pseudo_parse_arglist | python | def _pseudo_parse_arglist(signode, arglist):
paramlist = addnodes.desc_parameterlist()
stack = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = 0
ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# If there are too few or too many elements on the stack, just give
# up and treat the whole argument list as one argument, discarding
# the already partially populated paramlist node.
signode += addnodes.desc_parameterlist()
signode[-1] += addnodes.desc_parameter(arglist, arglist)
else:
signode += paramlist | Parse list of comma separated arguments.
Arguments can have optional types. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L143-L186 | null | class ChapelObject(ObjectDescription):
"""Base class for Chapel directives. It has methods for parsing signatures of
any form, and generating target and index text.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
ChapelTypedField('parameter', label=l_('Arguments'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='chplref',
typenames=('paramtype', 'type'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('yieldvalue', label=l_('Yields'), has_arg=False,
names=('yields', 'yield')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
Field('yieldtype', label=l_('Yield type'), has_arg=False,
names=('ytype',)),
GroupedField('errorhandling', label=l_('Throws'),
names=('throw', 'throws'), can_collapse=True),
]
@staticmethod
def _get_attr_like_prefix(self, sig):
"""Return prefix text for attribute or data directive."""
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype == 'type':
return 'type' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _get_proc_like_prefix(self, sig):
"""Return prefix text for function or method directive
(and similar).
"""
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype.startswith('iter'):
return 'iter' + ' '
elif self.objtype in ('method', 'function'):
return 'proc' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _is_attr_like(self):
"""Returns True when objtype is attribute or data."""
return self.objtype in ('attribute', 'data', 'type', 'enum')
def _is_proc_like(self):
"""Returns True when objtype is *function or *method."""
return (self.objtype in
('function', 'iterfunction', 'method', 'itermethod'))
def _get_sig_prefix(self, sig):
"""Return signature prefix text. For attribute, data, and proc/iter directives
this might be part of the signature. E.g. `type myNewType` will return
a prefix of 'type' and `inline proc foo()` will return 'inline proc'.
"""
if self._is_proc_like():
return self._get_proc_like_prefix(sig)
elif self._is_attr_like():
return self._get_attr_like_prefix(sig)
else:
return ChapelObject.get_signature_prefix(self, sig)
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in
the signature.
"""
return ''
def needs_arglist(self):
"""May return True if an empty argument list is to be generated even if the
document contains none.
"""
return False
def handle_signature(self, sig, signode):
"""Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`.
"""
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
"""Add cross-reference IDs and entries to the index node, if
applicable. *name_cls* is the return value of
:py:meth:`handle_signature`.
"""
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['chpl']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, ''))
def before_content(self):
"""Called before parsing content. Set flag to help with class scoping.
"""
self.clsname_set = False
def after_content(self):
"""Called after parsing content. If any classes were added to the env
temp_data, make sure they are removed.
"""
if self.clsname_set:
self.env.temp_data.pop('chpl:class', None)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelObject._get_attr_like_prefix | python | def _get_attr_like_prefix(self, sig):
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype == 'type':
return 'type' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig) | Return prefix text for attribute or data directive. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L188-L200 | null | class ChapelObject(ObjectDescription):
"""Base class for Chapel directives. It has methods for parsing signatures of
any form, and generating target and index text.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
ChapelTypedField('parameter', label=l_('Arguments'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='chplref',
typenames=('paramtype', 'type'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('yieldvalue', label=l_('Yields'), has_arg=False,
names=('yields', 'yield')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
Field('yieldtype', label=l_('Yield type'), has_arg=False,
names=('ytype',)),
GroupedField('errorhandling', label=l_('Throws'),
names=('throw', 'throws'), can_collapse=True),
]
@staticmethod
def _pseudo_parse_arglist(signode, arglist):
"""Parse list of comma separated arguments.
Arguments can have optional types.
"""
paramlist = addnodes.desc_parameterlist()
stack = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = 0
ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# If there are too few or too many elements on the stack, just give
# up and treat the whole argument list as one argument, discarding
# the already partially populated paramlist node.
signode += addnodes.desc_parameterlist()
signode[-1] += addnodes.desc_parameter(arglist, arglist)
else:
signode += paramlist
def _get_proc_like_prefix(self, sig):
"""Return prefix text for function or method directive
(and similar).
"""
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype.startswith('iter'):
return 'iter' + ' '
elif self.objtype in ('method', 'function'):
return 'proc' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _is_attr_like(self):
"""Returns True when objtype is attribute or data."""
return self.objtype in ('attribute', 'data', 'type', 'enum')
def _is_proc_like(self):
"""Returns True when objtype is *function or *method."""
return (self.objtype in
('function', 'iterfunction', 'method', 'itermethod'))
def _get_sig_prefix(self, sig):
"""Return signature prefix text. For attribute, data, and proc/iter directives
this might be part of the signature. E.g. `type myNewType` will return
a prefix of 'type' and `inline proc foo()` will return 'inline proc'.
"""
if self._is_proc_like():
return self._get_proc_like_prefix(sig)
elif self._is_attr_like():
return self._get_attr_like_prefix(sig)
else:
return ChapelObject.get_signature_prefix(self, sig)
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in
the signature.
"""
return ''
def needs_arglist(self):
"""May return True if an empty argument list is to be generated even if the
document contains none.
"""
return False
def handle_signature(self, sig, signode):
"""Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`.
"""
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
"""Add cross-reference IDs and entries to the index node, if
applicable. *name_cls* is the return value of
:py:meth:`handle_signature`.
"""
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['chpl']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, ''))
def before_content(self):
"""Called before parsing content. Set flag to help with class scoping.
"""
self.clsname_set = False
def after_content(self):
"""Called after parsing content. If any classes were added to the env
temp_data, make sure they are removed.
"""
if self.clsname_set:
self.env.temp_data.pop('chpl:class', None)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelObject._get_proc_like_prefix | python | def _get_proc_like_prefix(self, sig):
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype.startswith('iter'):
return 'iter' + ' '
elif self.objtype in ('method', 'function'):
return 'proc' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig) | Return prefix text for function or method directive
(and similar). | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L202-L218 | null | class ChapelObject(ObjectDescription):
"""Base class for Chapel directives. It has methods for parsing signatures of
any form, and generating target and index text.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
ChapelTypedField('parameter', label=l_('Arguments'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='chplref',
typenames=('paramtype', 'type'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('yieldvalue', label=l_('Yields'), has_arg=False,
names=('yields', 'yield')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
Field('yieldtype', label=l_('Yield type'), has_arg=False,
names=('ytype',)),
GroupedField('errorhandling', label=l_('Throws'),
names=('throw', 'throws'), can_collapse=True),
]
@staticmethod
def _pseudo_parse_arglist(signode, arglist):
"""Parse list of comma separated arguments.
Arguments can have optional types.
"""
paramlist = addnodes.desc_parameterlist()
stack = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = 0
ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# If there are too few or too many elements on the stack, just give
# up and treat the whole argument list as one argument, discarding
# the already partially populated paramlist node.
signode += addnodes.desc_parameterlist()
signode[-1] += addnodes.desc_parameter(arglist, arglist)
else:
signode += paramlist
def _get_attr_like_prefix(self, sig):
"""Return prefix text for attribute or data directive."""
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype == 'type':
return 'type' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _is_attr_like(self):
"""Returns True when objtype is attribute or data."""
return self.objtype in ('attribute', 'data', 'type', 'enum')
def _is_proc_like(self):
"""Returns True when objtype is *function or *method."""
return (self.objtype in
('function', 'iterfunction', 'method', 'itermethod'))
def _get_sig_prefix(self, sig):
"""Return signature prefix text. For attribute, data, and proc/iter directives
this might be part of the signature. E.g. `type myNewType` will return
a prefix of 'type' and `inline proc foo()` will return 'inline proc'.
"""
if self._is_proc_like():
return self._get_proc_like_prefix(sig)
elif self._is_attr_like():
return self._get_attr_like_prefix(sig)
else:
return ChapelObject.get_signature_prefix(self, sig)
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in
the signature.
"""
return ''
def needs_arglist(self):
"""May return True if an empty argument list is to be generated even if the
document contains none.
"""
return False
def handle_signature(self, sig, signode):
"""Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`.
"""
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
"""Add cross-reference IDs and entries to the index node, if
applicable. *name_cls* is the return value of
:py:meth:`handle_signature`.
"""
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['chpl']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, ''))
def before_content(self):
"""Called before parsing content. Set flag to help with class scoping.
"""
self.clsname_set = False
def after_content(self):
"""Called after parsing content. If any classes were added to the env
temp_data, make sure they are removed.
"""
if self.clsname_set:
self.env.temp_data.pop('chpl:class', None)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelObject._get_sig_prefix | python | def _get_sig_prefix(self, sig):
if self._is_proc_like():
return self._get_proc_like_prefix(sig)
elif self._is_attr_like():
return self._get_attr_like_prefix(sig)
else:
return ChapelObject.get_signature_prefix(self, sig) | Return signature prefix text. For attribute, data, and proc/iter directives
this might be part of the signature. E.g. `type myNewType` will return
a prefix of 'type' and `inline proc foo()` will return 'inline proc'. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L229-L239 | null | class ChapelObject(ObjectDescription):
"""Base class for Chapel directives. It has methods for parsing signatures of
any form, and generating target and index text.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
ChapelTypedField('parameter', label=l_('Arguments'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='chplref',
typenames=('paramtype', 'type'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('yieldvalue', label=l_('Yields'), has_arg=False,
names=('yields', 'yield')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
Field('yieldtype', label=l_('Yield type'), has_arg=False,
names=('ytype',)),
GroupedField('errorhandling', label=l_('Throws'),
names=('throw', 'throws'), can_collapse=True),
]
@staticmethod
def _pseudo_parse_arglist(signode, arglist):
"""Parse list of comma separated arguments.
Arguments can have optional types.
"""
paramlist = addnodes.desc_parameterlist()
stack = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = 0
ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# If there are too few or too many elements on the stack, just give
# up and treat the whole argument list as one argument, discarding
# the already partially populated paramlist node.
signode += addnodes.desc_parameterlist()
signode[-1] += addnodes.desc_parameter(arglist, arglist)
else:
signode += paramlist
def _get_attr_like_prefix(self, sig):
"""Return prefix text for attribute or data directive."""
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype == 'type':
return 'type' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _get_proc_like_prefix(self, sig):
"""Return prefix text for function or method directive
(and similar).
"""
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype.startswith('iter'):
return 'iter' + ' '
elif self.objtype in ('method', 'function'):
return 'proc' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _is_attr_like(self):
"""Returns True when objtype is attribute or data."""
return self.objtype in ('attribute', 'data', 'type', 'enum')
def _is_proc_like(self):
"""Returns True when objtype is *function or *method."""
return (self.objtype in
('function', 'iterfunction', 'method', 'itermethod'))
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in
the signature.
"""
return ''
def needs_arglist(self):
"""May return True if an empty argument list is to be generated even if the
document contains none.
"""
return False
def handle_signature(self, sig, signode):
"""Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`.
"""
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
"""Add cross-reference IDs and entries to the index node, if
applicable. *name_cls* is the return value of
:py:meth:`handle_signature`.
"""
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['chpl']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, ''))
def before_content(self):
"""Called before parsing content. Set flag to help with class scoping.
"""
self.clsname_set = False
def after_content(self):
"""Called after parsing content. If any classes were added to the env
temp_data, make sure they are removed.
"""
if self.clsname_set:
self.env.temp_data.pop('chpl:class', None)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelObject.handle_signature | python | def handle_signature(self, sig, signode):
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix | Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L253-L337 | null | class ChapelObject(ObjectDescription):
"""Base class for Chapel directives. It has methods for parsing signatures of
any form, and generating target and index text.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
ChapelTypedField('parameter', label=l_('Arguments'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='chplref',
typenames=('paramtype', 'type'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('yieldvalue', label=l_('Yields'), has_arg=False,
names=('yields', 'yield')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
Field('yieldtype', label=l_('Yield type'), has_arg=False,
names=('ytype',)),
GroupedField('errorhandling', label=l_('Throws'),
names=('throw', 'throws'), can_collapse=True),
]
@staticmethod
def _pseudo_parse_arglist(signode, arglist):
"""Parse list of comma separated arguments.
Arguments can have optional types.
"""
paramlist = addnodes.desc_parameterlist()
stack = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = 0
ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# If there are too few or too many elements on the stack, just give
# up and treat the whole argument list as one argument, discarding
# the already partially populated paramlist node.
signode += addnodes.desc_parameterlist()
signode[-1] += addnodes.desc_parameter(arglist, arglist)
else:
signode += paramlist
def _get_attr_like_prefix(self, sig):
"""Return prefix text for attribute or data directive."""
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype == 'type':
return 'type' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _get_proc_like_prefix(self, sig):
"""Return prefix text for function or method directive
(and similar).
"""
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype.startswith('iter'):
return 'iter' + ' '
elif self.objtype in ('method', 'function'):
return 'proc' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _is_attr_like(self):
"""Returns True when objtype is attribute or data."""
return self.objtype in ('attribute', 'data', 'type', 'enum')
def _is_proc_like(self):
"""Returns True when objtype is *function or *method."""
return (self.objtype in
('function', 'iterfunction', 'method', 'itermethod'))
def _get_sig_prefix(self, sig):
"""Return signature prefix text. For attribute, data, and proc/iter directives
this might be part of the signature. E.g. `type myNewType` will return
a prefix of 'type' and `inline proc foo()` will return 'inline proc'.
"""
if self._is_proc_like():
return self._get_proc_like_prefix(sig)
elif self._is_attr_like():
return self._get_attr_like_prefix(sig)
else:
return ChapelObject.get_signature_prefix(self, sig)
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in
the signature.
"""
return ''
def needs_arglist(self):
"""May return True if an empty argument list is to be generated even if the
document contains none.
"""
return False
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
"""Add cross-reference IDs and entries to the index node, if
applicable. *name_cls* is the return value of
:py:meth:`handle_signature`.
"""
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['chpl']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, ''))
def before_content(self):
"""Called before parsing content. Set flag to help with class scoping.
"""
self.clsname_set = False
def after_content(self):
"""Called after parsing content. If any classes were added to the env
temp_data, make sure they are removed.
"""
if self.clsname_set:
self.env.temp_data.pop('chpl:class', None)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelObject.add_target_and_index | python | def add_target_and_index(self, name_cls, sig, signode):
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['chpl']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, '')) | Add cross-reference IDs and entries to the index node, if
applicable. *name_cls* is the return value of
:py:meth:`handle_signature`. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L343-L370 | null | class ChapelObject(ObjectDescription):
"""Base class for Chapel directives. It has methods for parsing signatures of
any form, and generating target and index text.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
ChapelTypedField('parameter', label=l_('Arguments'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='chplref',
typenames=('paramtype', 'type'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('yieldvalue', label=l_('Yields'), has_arg=False,
names=('yields', 'yield')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
Field('yieldtype', label=l_('Yield type'), has_arg=False,
names=('ytype',)),
GroupedField('errorhandling', label=l_('Throws'),
names=('throw', 'throws'), can_collapse=True),
]
@staticmethod
def _pseudo_parse_arglist(signode, arglist):
"""Parse list of comma separated arguments.
Arguments can have optional types.
"""
paramlist = addnodes.desc_parameterlist()
stack = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = 0
ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# If there are too few or too many elements on the stack, just give
# up and treat the whole argument list as one argument, discarding
# the already partially populated paramlist node.
signode += addnodes.desc_parameterlist()
signode[-1] += addnodes.desc_parameter(arglist, arglist)
else:
signode += paramlist
def _get_attr_like_prefix(self, sig):
"""Return prefix text for attribute or data directive."""
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype == 'type':
return 'type' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _get_proc_like_prefix(self, sig):
"""Return prefix text for function or method directive
(and similar).
"""
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype.startswith('iter'):
return 'iter' + ' '
elif self.objtype in ('method', 'function'):
return 'proc' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _is_attr_like(self):
"""Returns True when objtype is attribute or data."""
return self.objtype in ('attribute', 'data', 'type', 'enum')
def _is_proc_like(self):
"""Returns True when objtype is *function or *method."""
return (self.objtype in
('function', 'iterfunction', 'method', 'itermethod'))
def _get_sig_prefix(self, sig):
"""Return signature prefix text. For attribute, data, and proc/iter directives
this might be part of the signature. E.g. `type myNewType` will return
a prefix of 'type' and `inline proc foo()` will return 'inline proc'.
"""
if self._is_proc_like():
return self._get_proc_like_prefix(sig)
elif self._is_attr_like():
return self._get_attr_like_prefix(sig)
else:
return ChapelObject.get_signature_prefix(self, sig)
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in
the signature.
"""
return ''
def needs_arglist(self):
"""May return True if an empty argument list is to be generated even if the
document contains none.
"""
return False
def handle_signature(self, sig, signode):
"""Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`.
"""
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def before_content(self):
"""Called before parsing content. Set flag to help with class scoping.
"""
self.clsname_set = False
def after_content(self):
"""Called after parsing content. If any classes were added to the env
temp_data, make sure they are removed.
"""
if self.clsname_set:
self.env.temp_data.pop('chpl:class', None)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelModule.run | python | def run(self):
env = self.state.document.settings.env
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
env.temp_data['chpl:module'] = modname
ret = []
if not noindex:
env.domaindata['chpl']['modules'][modname] = \
(env.docname, self.options.get('synopsis', ''),
self.options.get('platform', ''),
'deprecated' in self.options)
# Make a duplicate entry in 'objects' to facilitate searching for
# the module in ChapelDomain.find_obj().
env.domaindata['chpl']['objects'][modname] = (
env.docname, 'module')
targetnode = nodes.target('', '', ids=['module-' + modname],
ismod=True)
self.state.document.note_explicit_target(targetnode)
# The platform and synopsis are not printed. In fact, they are only
# used in the modindex currently.
ret.append(targetnode)
indextext = _('%s (module)') % modname
inode = addnodes.index(entries=[('single', indextext,
'module-' + modname, '')])
ret.append(inode)
return ret | Custom execution for chapel module directive. This class is instantiated by
the directive implementation and then this method is called. It parses
the options on the module directive, updates the environment according,
and creates an index entry for the module.
Based on the python domain module directive. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L399-L433 | null | class ChapelModule(Directive):
"""Directive to make description of a new module."""
has_content = False
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'platform': lambda x: x,
'synopsis': lambda x: x,
'noindex': directives.flag,
'deprecated': directives.flag,
}
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelClassMember.chpl_type_name | python | def chpl_type_name(self):
if not self.objtype.endswith('method'):
return ''
elif self.objtype.startswith('iter'):
return 'iterator'
elif self.objtype == 'method':
return 'method'
else:
return '' | Returns iterator or method or '' depending on object type. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L464-L473 | null | class ChapelClassMember(ChapelObject):
"""Description of Chapel class members, including attributes, procs,
and iters.
"""
@property
def get_signature_prefix(self, sig):
"""Return signature prefix based on sig. May include portion of the sig text,
if relevant (e.g. `proc foo()` will return 'proc' here.
"""
return self._get_sig_prefix(sig)
def needs_arglist(self):
"""Procs and iters need arglists. Attributes do not."""
return self.objtype.endswith('method')
def get_index_text(self, modname, name_cls):
"""Return text for index entry based on object type."""
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype.endswith('method'):
try:
clsname, methname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return _('%s()') % name
if modname and add_modules:
return _('%s() (%s.%s %s)') % \
(methname, modname, clsname, self.chpl_type_name)
else:
return _('%s() (%s %s)') % \
(methname, clsname, self.chpl_type_name)
elif self.objtype == 'attribute':
try:
clsname, attrname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
if modname and add_modules:
return _('%s (%s.%s attribute)') % (attrname, modname, clsname)
else:
return _('%s (%s attribute)') % (attrname, clsname)
else:
return ''
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelClassMember.get_index_text | python | def get_index_text(self, modname, name_cls):
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype.endswith('method'):
try:
clsname, methname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return _('%s()') % name
if modname and add_modules:
return _('%s() (%s.%s %s)') % \
(methname, modname, clsname, self.chpl_type_name)
else:
return _('%s() (%s %s)') % \
(methname, clsname, self.chpl_type_name)
elif self.objtype == 'attribute':
try:
clsname, attrname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
if modname and add_modules:
return _('%s (%s.%s attribute)') % (attrname, modname, clsname)
else:
return _('%s (%s attribute)') % (attrname, clsname)
else:
return '' | Return text for index entry based on object type. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L485-L516 | null | class ChapelClassMember(ChapelObject):
"""Description of Chapel class members, including attributes, procs,
and iters.
"""
@property
def chpl_type_name(self):
"""Returns iterator or method or '' depending on object type."""
if not self.objtype.endswith('method'):
return ''
elif self.objtype.startswith('iter'):
return 'iterator'
elif self.objtype == 'method':
return 'method'
else:
return ''
def get_signature_prefix(self, sig):
"""Return signature prefix based on sig. May include portion of the sig text,
if relevant (e.g. `proc foo()` will return 'proc' here.
"""
return self._get_sig_prefix(sig)
def needs_arglist(self):
"""Procs and iters need arglists. Attributes do not."""
return self.objtype.endswith('method')
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelClassObject.get_index_text | python | def get_index_text(self, modname, name_cls):
if self.objtype in ('class', 'record'):
if not modname:
return _('%s (built-in %s)') % (name_cls[0], self.objtype)
return _('%s (%s in %s)') % (name_cls[0], self.objtype, modname)
else:
return '' | Return index entry text based on object type. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L526-L533 | null | class ChapelClassObject(ChapelObject):
"""Chapel class and record description."""
def get_signature_prefix(self, sig):
"""Return class or record according to object type."""
return self.objtype + ' '
def before_content(self):
"""Called before parsing content. Push the class name onto the class name
stack. Used to construct the full name for members.
"""
ChapelObject.before_content(self)
if self.names:
self.env.temp_data['chpl:class'] = self.names[0][0]
self.clsname_set = True
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelClassObject.before_content | python | def before_content(self):
ChapelObject.before_content(self)
if self.names:
self.env.temp_data['chpl:class'] = self.names[0][0]
self.clsname_set = True | Called before parsing content. Push the class name onto the class name
stack. Used to construct the full name for members. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L535-L542 | null | class ChapelClassObject(ChapelObject):
"""Chapel class and record description."""
def get_signature_prefix(self, sig):
"""Return class or record according to object type."""
return self.objtype + ' '
def get_index_text(self, modname, name_cls):
"""Return index entry text based on object type."""
if self.objtype in ('class', 'record'):
if not modname:
return _('%s (built-in %s)') % (name_cls[0], self.objtype)
return _('%s (%s in %s)') % (name_cls[0], self.objtype, modname)
else:
return ''
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelModuleLevel.get_index_text | python | def get_index_text(self, modname, name_cls):
if self.objtype.endswith('function'):
if not modname:
return _('%s() (built-in %s)') % \
(name_cls[0], self.chpl_type_name)
return _('%s() (in module %s)') % (name_cls[0], modname)
elif self.objtype in ('data', 'type', 'enum'):
if not modname:
type_name = self.objtype
if type_name == 'data':
type_name = 'variable'
return _('%s (built-in %s)') % (name_cls[0], type_name)
return _('%s (in module %s)') % (name_cls[0], modname)
else:
return '' | Return text for index entry based on object type. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L576-L591 | null | class ChapelModuleLevel(ChapelObject):
"""Chapel module level functions, types, and variables (i.e. data directives)
descriptions.
"""
@property
def chpl_type_name(self):
"""Returns type, iterator, or procedure or '' depending on
object type.
"""
if self.objtype == 'type':
return 'type'
elif not self.objtype.endswith('function'):
return ''
elif self.objtype.startswith('iter'):
return 'iterator'
elif self.objtype == 'function':
return 'procedure'
else:
return ''
def get_signature_prefix(self, sig):
"""Return signature prefix based on sig. May include portion of the sig text,
if relevant (e.g. `proc foo()` will return `proc` here.
"""
return self._get_sig_prefix(sig)
def needs_arglist(self):
"""Procs and iters need arglists. Data directives do not."""
return self.objtype.endswith('function')
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelXRefRole.process_link | python | def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['chpl:module'] = env.temp_data.get('chpl:module')
refnode['chpl:class'] = env.temp_data.get('chpl:class')
if not has_explicit_title:
# Only has a meaning for the target.
title = title.lstrip('.')
# Only has a meaning for the title.
target = target.lstrip('~')
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot+1:]
# IF the first character is a dot, search more specific names
# first. Else, search builtins first.
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target | Called after parsing title and target text, and creating the reference
node. Alter the reference node and return it with chapel module and
class information, if relevant. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L601-L627 | null | class ChapelXRefRole(XRefRole):
"""Chapel cross-referencing role. Extends base XRefRole with special link
processing method. The Chapel link processing knows how to match a chapel
xref expression to the known objects, data, and modules in the current
project/documents.
"""
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelModuleIndex.generate | python | def generate(self, docnames=None):
content = {}
# list of prefixes to ignore
ignores = self.domain.env.config['chapeldomain_modindex_common_prefix']
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(iteritems(self.domain.data['modules']),
key=lambda x: x[0].lower())
# sort out collapsible modules
prev_modname = ''
num_toplevels = 0
for modname, (docname, synopsis, platforms, deprecated) in modules:
# If given a list of docnames and current docname is not in it,
# skip this docname for the index.
if docnames and docname not in docnames:
continue
for ignore in ignores:
if modname.startswith(ignore):
modname = modname[len(ignore):]
stripped = ignore
break
else:
stripped = ''
# we stripped the whole module name?
if not modname:
modname, stripped = stripped, ''
# Put the module in correct bucket (first letter).
entries = content.setdefault(modname[0].lower(), [])
package = modname.split('.')[0]
if package != modname:
# it's a submodule!
if prev_modname == package:
# first submodule - make parent a group head
if entries:
entries[-1][1] = 1
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
entries.append([stripped + package, 1, '', '', '', '', ''])
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = deprecated and _('Deprecated') or ''
entries.append([stripped + modname, subtype, docname,
'module-' + stripped + modname, platforms,
qualifier, synopsis])
prev_modname = modname
# apply heuristics when to collapse modindex at page load: only
# collapse if number of toplevel modules is larger than number of
# submodules
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first leter
content = sorted(iteritems(content))
return content, collapse | Returns entries for index given by ``name``. If ``docnames`` is given,
restrict to entries referring to these docnames.
Retunrs tuple of ``(content, collapse)``. ``collapse`` is bool. When
True, sub-entries should start collapsed for output formats that
support collapsing.
``content`` is a sequence of ``(letter, entries)`` tuples. ``letter``
is the "heading" for the given ``entries``, in this case the starting
letter.
``entries`` is a sequence of single entries, where a single entry is a
sequence ``[name, subtype, docname, anchor, extra, qualifier,
description]``. These items are:
* ``name`` - name of the index entry to be displayed
* ``subtype`` - sub-entry related type:
* 0 - normal entry
* 1 - entry with sub-entries
* 2 - sub-entry
* ``docname`` - docname where the entry is located
* ``anchor`` - anchor for the entry within docname
* ``extra`` - extra info for the entry
* ``qualifier`` - qualifier for the description
* ``description`` - description for the entry
Qualifier and description are not rendered in some output formats. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L637-L731 | null | class ChapelModuleIndex(Index):
"""Provides Chapel module index based on chpl:module."""
name = 'modindex'
localname = l_('Chapel Module Index')
shortname = l_('modules')
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain.clear_doc | python | def clear_doc(self, docname):
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname] | Remove the data associated with this instance of the domain. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L808-L821 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain.find_obj | python | def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches | Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L823-L880 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname]
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain.resolve_xref | python | def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name) | Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L882-L928 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain.resolve_any_xref | python | def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results | Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode). | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L930-L953 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain._make_refnode | python | def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode | Return reference node for something like ``:chpl:chplref:``. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L955-L973 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain._make_module_refnode | python | def _make_module_refnode(self, builder, fromdocname, name, contnode):
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title) | Helper function to generate new xref node based on
current environment. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L975-L989 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain.merge_domaindata | python | def merge_domaindata(self, docnames, otherdata):
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data | Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build). | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L991-L1006 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
|
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | ChapelDomain.get_objects | python | def get_objects(self):
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1) | Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`. | train | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L1008-L1025 | null | class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(l_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(l_('type'), 'type', 'data'),
'function': ObjType(l_('function'), 'func', 'proc'),
'iterfunction': ObjType(l_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(l_('enum'), 'enum'),
'class': ObjType(l_('class'), 'class'),
'record': ObjType(l_('record'), 'record'),
'method': ObjType(l_('method'), 'meth', 'proc'),
'itermethod': ObjType(l_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(l_('attribute'), 'attr'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', l_('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
|
askedrelic/libgreader | libgreader/auth.py | ClientAuthMethod.get | python | def get(self, url, parameters=None):
getString = self.getParameters(parameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(url + "?" + getString, headers=headers)
return req.text | Convenience method for requesting to google with proper cookies/params. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L74-L81 | [
"def getParameters(self, extraargs=None):\n parameters = {'ck':time.time(), 'client':self.client}\n if extraargs:\n parameters.update(extraargs)\n return urlencode(parameters)\n"
] | class ClientAuthMethod(AuthenticationMethod):
"""
Auth type which requires a valid Google Reader username and password
"""
CLIENT_URL = 'https://www.google.com/accounts/ClientLogin'
def __init__(self, username, password):
super(ClientAuthMethod, self).__init__()
self.username = username
self.password = password
self.auth_token = self._getAuth()
self.token = self._getToken()
def postParameters(self, post=None):
post.update({'T': self.token})
return super(ClientAuthMethod, self).postParameters(post)
def post(self, url, postParameters=None, urlParameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if urlParameters:
url = url + "?" + self.getParameters(urlParameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,
'Content-Type': 'application/x-www-form-urlencoded'
}
postString = self.postParameters(postParameters)
req = requests.post(url, data=postString, headers=headers)
return req.text
def _getAuth(self):
"""
Main step in authorizing with Reader.
Sends request to Google ClientAuthMethod URL which returns an Auth token.
Returns Auth token or raises IOError on error.
"""
parameters = {
'service' : 'reader',
'Email' : self.username,
'Passwd' : self.password,
'accountType' : 'GOOGLE'}
req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)
if req.status_code != 200:
raise IOError("Error getting the Auth token, have you entered a"
"correct username and password?")
data = req.text
#Strip newline and non token text.
token_dict = dict(x.split('=') for x in data.split('\n') if x)
return token_dict["Auth"]
def _getToken(self):
"""
Second step in authorizing with Reader.
Sends authorized request to Reader token URL and returns a token value.
Returns token or raises IOError on error.
"""
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(ReaderUrl.API_URL + 'token', headers=headers)
if req.status_code != 200:
raise IOError("Error getting the Reader token.")
return req.content
|
askedrelic/libgreader | libgreader/auth.py | ClientAuthMethod.post | python | def post(self, url, postParameters=None, urlParameters=None):
if urlParameters:
url = url + "?" + self.getParameters(urlParameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,
'Content-Type': 'application/x-www-form-urlencoded'
}
postString = self.postParameters(postParameters)
req = requests.post(url, data=postString, headers=headers)
return req.text | Convenience method for requesting to google with proper cookies/params. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L83-L94 | [
"def getParameters(self, extraargs=None):\n parameters = {'ck':time.time(), 'client':self.client}\n if extraargs:\n parameters.update(extraargs)\n return urlencode(parameters)\n",
"def postParameters(self, post=None):\n post.update({'T': self.token})\n return super(ClientAuthMethod, self).po... | class ClientAuthMethod(AuthenticationMethod):
"""
Auth type which requires a valid Google Reader username and password
"""
CLIENT_URL = 'https://www.google.com/accounts/ClientLogin'
def __init__(self, username, password):
super(ClientAuthMethod, self).__init__()
self.username = username
self.password = password
self.auth_token = self._getAuth()
self.token = self._getToken()
def postParameters(self, post=None):
post.update({'T': self.token})
return super(ClientAuthMethod, self).postParameters(post)
def get(self, url, parameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
getString = self.getParameters(parameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(url + "?" + getString, headers=headers)
return req.text
def _getAuth(self):
"""
Main step in authorizing with Reader.
Sends request to Google ClientAuthMethod URL which returns an Auth token.
Returns Auth token or raises IOError on error.
"""
parameters = {
'service' : 'reader',
'Email' : self.username,
'Passwd' : self.password,
'accountType' : 'GOOGLE'}
req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)
if req.status_code != 200:
raise IOError("Error getting the Auth token, have you entered a"
"correct username and password?")
data = req.text
#Strip newline and non token text.
token_dict = dict(x.split('=') for x in data.split('\n') if x)
return token_dict["Auth"]
def _getToken(self):
"""
Second step in authorizing with Reader.
Sends authorized request to Reader token URL and returns a token value.
Returns token or raises IOError on error.
"""
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(ReaderUrl.API_URL + 'token', headers=headers)
if req.status_code != 200:
raise IOError("Error getting the Reader token.")
return req.content
|
askedrelic/libgreader | libgreader/auth.py | ClientAuthMethod._getAuth | python | def _getAuth(self):
parameters = {
'service' : 'reader',
'Email' : self.username,
'Passwd' : self.password,
'accountType' : 'GOOGLE'}
req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)
if req.status_code != 200:
raise IOError("Error getting the Auth token, have you entered a"
"correct username and password?")
data = req.text
#Strip newline and non token text.
token_dict = dict(x.split('=') for x in data.split('\n') if x)
return token_dict["Auth"] | Main step in authorizing with Reader.
Sends request to Google ClientAuthMethod URL which returns an Auth token.
Returns Auth token or raises IOError on error. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L96-L115 | null | class ClientAuthMethod(AuthenticationMethod):
"""
Auth type which requires a valid Google Reader username and password
"""
CLIENT_URL = 'https://www.google.com/accounts/ClientLogin'
def __init__(self, username, password):
super(ClientAuthMethod, self).__init__()
self.username = username
self.password = password
self.auth_token = self._getAuth()
self.token = self._getToken()
def postParameters(self, post=None):
post.update({'T': self.token})
return super(ClientAuthMethod, self).postParameters(post)
def get(self, url, parameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
getString = self.getParameters(parameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(url + "?" + getString, headers=headers)
return req.text
def post(self, url, postParameters=None, urlParameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if urlParameters:
url = url + "?" + self.getParameters(urlParameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,
'Content-Type': 'application/x-www-form-urlencoded'
}
postString = self.postParameters(postParameters)
req = requests.post(url, data=postString, headers=headers)
return req.text
def _getToken(self):
"""
Second step in authorizing with Reader.
Sends authorized request to Reader token URL and returns a token value.
Returns token or raises IOError on error.
"""
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(ReaderUrl.API_URL + 'token', headers=headers)
if req.status_code != 200:
raise IOError("Error getting the Reader token.")
return req.content
|
askedrelic/libgreader | libgreader/auth.py | ClientAuthMethod._getToken | python | def _getToken(self):
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(ReaderUrl.API_URL + 'token', headers=headers)
if req.status_code != 200:
raise IOError("Error getting the Reader token.")
return req.content | Second step in authorizing with Reader.
Sends authorized request to Reader token URL and returns a token value.
Returns token or raises IOError on error. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L117-L128 | null | class ClientAuthMethod(AuthenticationMethod):
"""
Auth type which requires a valid Google Reader username and password
"""
CLIENT_URL = 'https://www.google.com/accounts/ClientLogin'
def __init__(self, username, password):
super(ClientAuthMethod, self).__init__()
self.username = username
self.password = password
self.auth_token = self._getAuth()
self.token = self._getToken()
def postParameters(self, post=None):
post.update({'T': self.token})
return super(ClientAuthMethod, self).postParameters(post)
def get(self, url, parameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
getString = self.getParameters(parameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token}
req = requests.get(url + "?" + getString, headers=headers)
return req.text
def post(self, url, postParameters=None, urlParameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if urlParameters:
url = url + "?" + self.getParameters(urlParameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,
'Content-Type': 'application/x-www-form-urlencoded'
}
postString = self.postParameters(postParameters)
req = requests.post(url, data=postString, headers=headers)
return req.text
def _getAuth(self):
"""
Main step in authorizing with Reader.
Sends request to Google ClientAuthMethod URL which returns an Auth token.
Returns Auth token or raises IOError on error.
"""
parameters = {
'service' : 'reader',
'Email' : self.username,
'Passwd' : self.password,
'accountType' : 'GOOGLE'}
req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)
if req.status_code != 200:
raise IOError("Error getting the Auth token, have you entered a"
"correct username and password?")
data = req.text
#Strip newline and non token text.
token_dict = dict(x.split('=') for x in data.split('\n') if x)
return token_dict["Auth"]
|
askedrelic/libgreader | libgreader/auth.py | OAuthMethod.post | python | def post(self, url, postParameters=None, urlParameters=None):
if self.authorized_client:
if urlParameters:
getString = self.getParameters(urlParameters)
req = urllib2.Request(url + "?" + getString)
else:
req = urllib2.Request(url)
postString = self.postParameters(postParameters)
resp,content = self.authorized_client.request(req, method="POST", body=postString)
return toUnicode(content)
else:
raise IOError("No authorized client available.") | Convenience method for requesting to google with proper cookies/params. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L218-L232 | [
"def toUnicode(obj, encoding='utf-8'):\n return obj\n",
"def getParameters(self, extraargs=None):\n parameters = {'ck':time.time(), 'client':self.client}\n if extraargs:\n parameters.update(extraargs)\n return urlencode(parameters)\n",
"def postParameters(self, post=None):\n return post\n"... | class OAuthMethod(AuthenticationMethod):
"""
Loose wrapper around OAuth2 lib. Kinda awkward.
"""
GOOGLE_URL = 'https://www.google.com/accounts/'
REQUEST_TOKEN_URL = (GOOGLE_URL + 'OAuthGetRequestToken?scope=%s' %
ReaderUrl.READER_BASE_URL)
AUTHORIZE_URL = GOOGLE_URL + 'OAuthAuthorizeToken'
ACCESS_TOKEN_URL = GOOGLE_URL + 'OAuthGetAccessToken'
def __init__(self, consumer_key, consumer_secret):
if not has_oauth:
raise ImportError("No module named oauth2")
super(OAuthMethod, self).__init__()
self.oauth_key = consumer_key
self.oauth_secret = consumer_secret
self.consumer = oauth.Consumer(self.oauth_key, self.oauth_secret)
self.authorized_client = None
self.token_key = None
self.token_secret = None
self.callback = None
self.username = "OAuth"
def setCallback(self, callback_url):
self.callback = '&oauth_callback=%s' % callback_url
def setRequestToken(self):
# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.
client = oauth.Client(self.consumer)
if not self.callback:
resp, content = client.request(OAuthMethod.REQUEST_TOKEN_URL)
else:
resp, content = client.request(OAuthMethod.REQUEST_TOKEN_URL + self.callback)
if int(resp['status']) != 200:
raise IOError("Error setting Request Token")
token_dict = dict(urlparse.parse_qsl(content))
self.token_key = token_dict['oauth_token']
self.token_secret = token_dict['oauth_token_secret']
def setAndGetRequestToken(self):
self.setRequestToken()
return (self.token_key, self.token_secret)
def buildAuthUrl(self, token_key=None):
if not token_key:
token_key = self.token_key
#return auth url for user to click or redirect to
return "%s?oauth_token=%s" % (OAuthMethod.AUTHORIZE_URL, token_key)
def setAccessToken(self):
self.setAccessTokenFromCallback(self.token_key, self.token_secret, None)
def setAccessTokenFromCallback(self, token_key, token_secret, verifier):
token = oauth.Token(token_key, token_secret)
#step 2 depends on callback
if verifier:
token.set_verifier(verifier)
client = oauth.Client(self.consumer, token)
resp, content = client.request(OAuthMethod.ACCESS_TOKEN_URL, "POST")
if int(resp['status']) != 200:
raise IOError("Error setting Access Token")
access_token = dict(urlparse.parse_qsl(content))
#created Authorized client using access tokens
self.authFromAccessToken(access_token['oauth_token'],
access_token['oauth_token_secret'])
def authFromAccessToken(self, oauth_token, oauth_token_secret):
self.token_key = oauth_token
self.token_secret = oauth_token_secret
token = oauth.Token(oauth_token,oauth_token_secret)
self.authorized_client = oauth.Client(self.consumer, token)
def getAccessToken(self):
return (self.token_key, self.token_secret)
def get(self, url, parameters=None):
if self.authorized_client:
getString = self.getParameters(parameters)
#can't pass in urllib2 Request object here?
resp, content = self.authorized_client.request(url + "?" + getString)
return toUnicode(content)
else:
raise IOError("No authorized client available.")
|
askedrelic/libgreader | libgreader/auth.py | OAuth2Method.get | python | def get(self, url, parameters=None):
if not self.access_token:
raise IOError("No authorized client available.")
if parameters is None:
parameters = {}
parameters.update({'access_token': self.access_token, 'alt': 'json'})
request = requests.get(url + '?' + self.getParameters(parameters))
if request.status_code != 200:
return None
else:
return toUnicode(request.text) | Convenience method for requesting to google with proper cookies/params. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L303-L316 | [
"def toUnicode(obj, encoding='utf-8'):\n return obj\n",
"def getParameters(self, extraargs=None):\n parameters = {'ck':time.time(), 'client':self.client}\n if extraargs:\n parameters.update(extraargs)\n return urlencode(parameters)\n"
] | class OAuth2Method(AuthenticationMethod):
'''
Google OAuth2 base method.
'''
GOOGLE_URL = 'https://accounts.google.com'
AUTHORIZATION_URL = GOOGLE_URL + '/o/oauth2/auth'
ACCESS_TOKEN_URL = GOOGLE_URL + '/o/oauth2/token'
SCOPE = [
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.google.com/reader/api/',
]
def __init__(self, client_id, client_secret):
super(OAuth2Method, self).__init__()
self.client_id = client_id
self.client_secret = client_secret
self.authorized_client = None
self.code = None
self.access_token = None
self.action_token = None
self.redirect_uri = None
self.username = "OAuth2"
def setRedirectUri(self, redirect_uri):
self.redirect_uri = redirect_uri
def buildAuthUrl(self):
args = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': ' '.join(self.SCOPE),
'response_type': 'code',
}
return self.AUTHORIZATION_URL + '?' + urlencode(args)
def setActionToken(self):
'''
Get action to prevent XSRF attacks
http://code.google.com/p/google-reader-api/wiki/ActionToken
TODO: mask token expiring? handle regenerating?
'''
self.action_token = self.get(ReaderUrl.ACTION_TOKEN_URL)
def setAccessToken(self):
params = {
'grant_type': 'authorization_code', # request auth code
'code': self.code, # server response code
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri
}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
request = requests.post(self.ACCESS_TOKEN_URL, data=params,
headers=headers)
if request.status_code != 200:
raise IOError('Error getting Access Token')
response = request.json()
if 'access_token' not in response:
raise IOError('Error getting Access Token')
else:
self.authFromAccessToken(response['access_token'])
def authFromAccessToken(self, access_token):
self.access_token = access_token
def post(self, url, postParameters=None, urlParameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if not self.access_token:
raise IOError("No authorized client available.")
if not self.action_token:
raise IOError("Need to generate action token.")
if urlParameters is None:
urlParameters = {}
headers = {'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/x-www-form-urlencoded'}
postParameters.update({'T':self.action_token})
request = requests.post(url + '?' + self.getParameters(urlParameters),
data=postParameters, headers=headers)
if request.status_code != 200:
return None
else:
return toUnicode(request.text)
|
askedrelic/libgreader | libgreader/auth.py | OAuth2Method.post | python | def post(self, url, postParameters=None, urlParameters=None):
if not self.access_token:
raise IOError("No authorized client available.")
if not self.action_token:
raise IOError("Need to generate action token.")
if urlParameters is None:
urlParameters = {}
headers = {'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/x-www-form-urlencoded'}
postParameters.update({'T':self.action_token})
request = requests.post(url + '?' + self.getParameters(urlParameters),
data=postParameters, headers=headers)
if request.status_code != 200:
return None
else:
return toUnicode(request.text) | Convenience method for requesting to google with proper cookies/params. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L318-L336 | [
"def toUnicode(obj, encoding='utf-8'):\n return obj\n",
"def getParameters(self, extraargs=None):\n parameters = {'ck':time.time(), 'client':self.client}\n if extraargs:\n parameters.update(extraargs)\n return urlencode(parameters)\n"
] | class OAuth2Method(AuthenticationMethod):
'''
Google OAuth2 base method.
'''
GOOGLE_URL = 'https://accounts.google.com'
AUTHORIZATION_URL = GOOGLE_URL + '/o/oauth2/auth'
ACCESS_TOKEN_URL = GOOGLE_URL + '/o/oauth2/token'
SCOPE = [
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.google.com/reader/api/',
]
def __init__(self, client_id, client_secret):
super(OAuth2Method, self).__init__()
self.client_id = client_id
self.client_secret = client_secret
self.authorized_client = None
self.code = None
self.access_token = None
self.action_token = None
self.redirect_uri = None
self.username = "OAuth2"
def setRedirectUri(self, redirect_uri):
self.redirect_uri = redirect_uri
def buildAuthUrl(self):
args = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': ' '.join(self.SCOPE),
'response_type': 'code',
}
return self.AUTHORIZATION_URL + '?' + urlencode(args)
def setActionToken(self):
'''
Get action to prevent XSRF attacks
http://code.google.com/p/google-reader-api/wiki/ActionToken
TODO: mask token expiring? handle regenerating?
'''
self.action_token = self.get(ReaderUrl.ACTION_TOKEN_URL)
def setAccessToken(self):
params = {
'grant_type': 'authorization_code', # request auth code
'code': self.code, # server response code
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri
}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
request = requests.post(self.ACCESS_TOKEN_URL, data=params,
headers=headers)
if request.status_code != 200:
raise IOError('Error getting Access Token')
response = request.json()
if 'access_token' not in response:
raise IOError('Error getting Access Token')
else:
self.authFromAccessToken(response['access_token'])
def authFromAccessToken(self, access_token):
self.access_token = access_token
def get(self, url, parameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if not self.access_token:
raise IOError("No authorized client available.")
if parameters is None:
parameters = {}
parameters.update({'access_token': self.access_token, 'alt': 'json'})
request = requests.get(url + '?' + self.getParameters(parameters))
if request.status_code != 200:
return None
else:
return toUnicode(request.text)
|
askedrelic/libgreader | libgreader/auth.py | GAPDecoratorAuthMethod._setupHttp | python | def _setupHttp(self):
if self._http == None:
http = httplib2.Http()
self._http = self._credentials.authorize(http) | Setup an HTTP session authorized by OAuth2. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L356-L362 | null | class GAPDecoratorAuthMethod(AuthenticationMethod):
"""
An adapter to work with Google API for Python OAuth2 wrapper.
Especially useful when deploying to Google AppEngine.
"""
def __init__(self, credentials):
"""
Initialize auth method with existing credentials.
Args:
credentials: OAuth2 credentials obtained via GAP OAuth2 library.
"""
if not has_httplib2:
raise ImportError("No module named httplib2")
super(GAPDecoratorAuthMethod, self).__init__()
self._http = None
self._credentials = credentials
self._action_token = None
def get(self, url, parameters=None):
"""
Implement libgreader's interface for authenticated GET request
"""
if self._http == None:
self._setupHttp()
uri = url + "?" + self.getParameters(parameters)
response, content = self._http.request(uri, "GET")
return content
def post(self, url, postParameters=None, urlParameters=None):
"""
Implement libgreader's interface for authenticated POST request
"""
if self._action_token == None:
self._action_token = self.get(ReaderUrl.ACTION_TOKEN_URL)
if self._http == None:
self._setupHttp()
uri = url + "?" + self.getParameters(urlParameters)
postParameters.update({'T':self._action_token})
body = self.postParameters(postParameters)
response, content = self._http.request(uri, "POST", body=body)
return content
|
askedrelic/libgreader | libgreader/auth.py | GAPDecoratorAuthMethod.get | python | def get(self, url, parameters=None):
if self._http == None:
self._setupHttp()
uri = url + "?" + self.getParameters(parameters)
response, content = self._http.request(uri, "GET")
return content | Implement libgreader's interface for authenticated GET request | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L364-L372 | [
"def getParameters(self, extraargs=None):\n parameters = {'ck':time.time(), 'client':self.client}\n if extraargs:\n parameters.update(extraargs)\n return urlencode(parameters)\n",
"def _setupHttp(self):\n \"\"\"\n Setup an HTTP session authorized by OAuth2.\n \"\"\"\n if self._http == ... | class GAPDecoratorAuthMethod(AuthenticationMethod):
"""
An adapter to work with Google API for Python OAuth2 wrapper.
Especially useful when deploying to Google AppEngine.
"""
def __init__(self, credentials):
"""
Initialize auth method with existing credentials.
Args:
credentials: OAuth2 credentials obtained via GAP OAuth2 library.
"""
if not has_httplib2:
raise ImportError("No module named httplib2")
super(GAPDecoratorAuthMethod, self).__init__()
self._http = None
self._credentials = credentials
self._action_token = None
def _setupHttp(self):
"""
Setup an HTTP session authorized by OAuth2.
"""
if self._http == None:
http = httplib2.Http()
self._http = self._credentials.authorize(http)
def post(self, url, postParameters=None, urlParameters=None):
"""
Implement libgreader's interface for authenticated POST request
"""
if self._action_token == None:
self._action_token = self.get(ReaderUrl.ACTION_TOKEN_URL)
if self._http == None:
self._setupHttp()
uri = url + "?" + self.getParameters(urlParameters)
postParameters.update({'T':self._action_token})
body = self.postParameters(postParameters)
response, content = self._http.request(uri, "POST", body=body)
return content
|
askedrelic/libgreader | libgreader/auth.py | GAPDecoratorAuthMethod.post | python | def post(self, url, postParameters=None, urlParameters=None):
if self._action_token == None:
self._action_token = self.get(ReaderUrl.ACTION_TOKEN_URL)
if self._http == None:
self._setupHttp()
uri = url + "?" + self.getParameters(urlParameters)
postParameters.update({'T':self._action_token})
body = self.postParameters(postParameters)
response, content = self._http.request(uri, "POST", body=body)
return content | Implement libgreader's interface for authenticated POST request | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L374-L387 | [
"def getParameters(self, extraargs=None):\n parameters = {'ck':time.time(), 'client':self.client}\n if extraargs:\n parameters.update(extraargs)\n return urlencode(parameters)\n",
"def postParameters(self, post=None):\n return post\n",
"def _setupHttp(self):\n \"\"\"\n Setup an HTTP ses... | class GAPDecoratorAuthMethod(AuthenticationMethod):
"""
An adapter to work with Google API for Python OAuth2 wrapper.
Especially useful when deploying to Google AppEngine.
"""
def __init__(self, credentials):
"""
Initialize auth method with existing credentials.
Args:
credentials: OAuth2 credentials obtained via GAP OAuth2 library.
"""
if not has_httplib2:
raise ImportError("No module named httplib2")
super(GAPDecoratorAuthMethod, self).__init__()
self._http = None
self._credentials = credentials
self._action_token = None
def _setupHttp(self):
"""
Setup an HTTP session authorized by OAuth2.
"""
if self._http == None:
http = httplib2.Http()
self._http = self._credentials.authorize(http)
def get(self, url, parameters=None):
"""
Implement libgreader's interface for authenticated GET request
"""
if self._http == None:
self._setupHttp()
uri = url + "?" + self.getParameters(parameters)
response, content = self._http.request(uri, "GET")
return content
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader.buildSubscriptionList | python | def buildSubscriptionList(self):
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True | Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L74-L134 | [
"def getUserInfo(self):\n \"\"\"\n Returns a dictionary of user info that google stores.\n \"\"\"\n userJson = self.httpGet(ReaderUrl.USER_INFO_URL)\n result = json.loads(userJson, strict=False)\n self.userId = result['userId']\n return result\n",
"def httpGet(self, url, parameters=None):\n ... | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader._getFeedContent | python | def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False) | A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner) | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L136-L162 | [
"def httpGet(self, url, parameters=None):\n \"\"\"\n Wrapper around AuthenticationMethod get()\n \"\"\"\n return self.auth.get(url, parameters)\n"
] | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader.getFeedContent | python | def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until) | Return items for a particular feed | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L170-L174 | [
"def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):\n \"\"\"\n A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)\n\n Returns a dict with\n :param id: (str, feed's id)\n :param continuation: (str, to be used to... | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader.getCategoryContent | python | def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until) | Return items for a particular category | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L176-L180 | [
"def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):\n \"\"\"\n A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)\n\n Returns a dict with\n :param id: (str, feed's id)\n :param continuation: (str, to be used to... | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader._modifyItemTag | python | def _modifyItemTag(self, item_id, action, tag):
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'}) | wrapper around actual HTTP POST string for modify tags | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L182-L185 | [
"def httpPost(self, url, post_parameters=None):\n \"\"\"\n Wrapper around AuthenticationMethod post()\n \"\"\"\n return self.auth.post(url, post_parameters)\n"
] | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader.addItemTag | python | def addItemTag(self, item, tag):
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag) | Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]" | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L201-L214 | [
"def _modifyItemTag(self, item_id, action, tag):\n \"\"\" wrapper around actual HTTP POST string for modify tags \"\"\"\n return self.httpPost(ReaderUrl.EDIT_TAG_URL,\n {'i': item_id, action: tag, 'ac': 'edit-tags'})\n"
] | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader.subscribe | python | def subscribe(self, feedUrl):
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False | Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L235-L251 | [
"def httpPost(self, url, post_parameters=None):\n \"\"\"\n Wrapper around AuthenticationMethod post()\n \"\"\"\n return self.auth.post(url, post_parameters)\n"
] | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader.getUserInfo | python | def getUserInfo(self):
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result | Returns a dictionary of user info that google stores. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L271-L278 | [
"def httpGet(self, url, parameters=None):\n \"\"\"\n Wrapper around AuthenticationMethod get()\n \"\"\"\n return self.auth.get(url, parameters)\n"
] | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader.getUserSignupDate | python | def getUserSignupDate(self):
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp)) | Returns the human readable date of when the user signed up for google reader. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L280-L286 | [
"def getUserInfo(self):\n \"\"\"\n Returns a dictionary of user info that google stores.\n \"\"\"\n userJson = self.httpGet(ReaderUrl.USER_INFO_URL)\n result = json.loads(userJson, strict=False)\n self.userId = result['userId']\n return result\n"
] | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
|
askedrelic/libgreader | libgreader/googlereader.py | GoogleReader._clearLists | python | def _clearLists(self):
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = [] | Clear all list before sync : feeds and categories | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L316-L324 | null | class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
def __repr__(self):
return "<Google Reader object: %s>" % self.auth.username
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<Google Reader object: %s>" % self.auth.username
def __init__(self, auth):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = None
self.addTagBacklog = {}
self.inItemTagTransaction = False
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getFeeds(self):
"""
@Deprecated, see getSubscriptionList
"""
return self.feeds
def getSubscriptionList(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def makeSpecialFeeds(self):
for type in ReaderUrl.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
parameters['n'] = loadLimit
if since:
parameters['ot'] = since
if until:
parameters['nt'] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def getCategoryContent(self, category, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Return items for a particular category
"""
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, loadLimit, since, until)
def _modifyItemTag(self, item_id, action, tag):
""" wrapper around actual HTTP POST string for modify tags """
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'})
def removeItemTag(self, item, tag):
"""
Remove a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
return self._modifyItemTag(item.id, 'r', tag)
def beginAddItemTagTransaction(self):
if self.inItemTagTransaction:
raise Exception("Already in addItemTag transaction")
self.addTagBacklog = {}
self.inItemTagTransaction = True
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag)
def commitAddItemTagTransaction(self):
if self.inItemTagTransaction:
for tag in self.addTagBacklog:
itemIds = [item['i'] for item in self.addTagBacklog[tag]]
feedIds = [item['s'] for item in self.addTagBacklog[tag]]
self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': itemIds, 'a': tag, 'ac': 'edit-tags', 's': feedIds})
self.addTagBacklog = {}
self.inItemTagTransaction = False
return True
else:
raise Exception("Not in addItemTag transaction")
def markFeedAsRead(self, feed):
return self.httpPost(
ReaderUrl.MARK_ALL_READ_URL,
{'s': feed.id, })
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def unsubscribe(self, feedUrl):
"""
Removes a feed url from the top-level subscription list
Unsubscribing seems idempotent, you can unsubscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'unsubscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
|
askedrelic/libgreader | libgreader/items.py | ItemsContainer.loadItems | python | def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until)) | Load items and call itemsLoadedDone to transform data in objects | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/items.py#L27-L34 | [
"def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):\n \"\"\"\n Get content from google reader with specified parameters.\n Must be overladed in inherited clases\n \"\"\"\n return None\n",
"def _itemsLoadedDone(self, data):\n \"\"\"\n Called when... | class ItemsContainer(object):
"""
A base class used for all classes aimed to have items (Categories and Feeds)
"""
def __init__(self):
self.items = []
self.itemsById = {}
self.lastLoadOk = False
self.lastLoadLength = 0
self.lastUpdated = None
self.unread = 0
self.continuation = None
def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Get content from google reader with specified parameters.
Must be overladed in inherited clases
"""
return None
def loadMoreItems(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Load more items using the continuation parameters of previously loaded items.
"""
self.lastLoadOk = False
self.lastLoadLength = 0
if not continuation and not self.continuation:
return
self._itemsLoadedDone(self._getContent(excludeRead, continuation or self.continuation, loadLimit, since, until))
def _itemsLoadedDone(self, data):
"""
Called when all items are loaded
"""
if data is None:
return
self.continuation = data.get('continuation', None)
self.lastUpdated = data.get('updated', None)
self.lastLoadLength = len(data.get('items', []))
self.googleReader.itemsToObjects(self, data.get('items', []))
self.lastLoadOk = True
def _addItem(self, item):
self.items.append(item)
self.itemsById[item.id] = item
def getItem(self, id):
return self.itemsById[id]
def clearItems(self):
self.items = []
self.itemsById = {}
self.continuation = None
def getItems(self):
return self.items
def countItems(self, excludeRead=False):
if excludeRead:
sum([1 for item in self.items if item.isUnread()])
else:
return len(self.items)
def markItemRead(self, item, read):
if read and item.isUnread():
self.unread -= 1
elif not read and item.isRead():
self.unread += 1
def markAllRead(self):
self.unread = 0
for item in self.items:
item.read = True
item.canUnread = False
result = self.googleReader.markFeedAsRead(self)
return result.upper() == 'OK'
def countUnread(self):
self.unread = self.countItems(excludeRead=True)
|
askedrelic/libgreader | libgreader/items.py | ItemsContainer.loadMoreItems | python | def loadMoreItems(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
self.lastLoadOk = False
self.lastLoadLength = 0
if not continuation and not self.continuation:
return
self._itemsLoadedDone(self._getContent(excludeRead, continuation or self.continuation, loadLimit, since, until)) | Load more items using the continuation parameters of previously loaded items. | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/items.py#L36-L44 | [
"def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):\n \"\"\"\n Get content from google reader with specified parameters.\n Must be overladed in inherited clases\n \"\"\"\n return None\n",
"def _itemsLoadedDone(self, data):\n \"\"\"\n Called when... | class ItemsContainer(object):
"""
A base class used for all classes aimed to have items (Categories and Feeds)
"""
def __init__(self):
self.items = []
self.itemsById = {}
self.lastLoadOk = False
self.lastLoadLength = 0
self.lastUpdated = None
self.unread = 0
self.continuation = None
def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Get content from google reader with specified parameters.
Must be overladed in inherited clases
"""
return None
def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):
"""
Load items and call itemsLoadedDone to transform data in objects
"""
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until))
def _itemsLoadedDone(self, data):
"""
Called when all items are loaded
"""
if data is None:
return
self.continuation = data.get('continuation', None)
self.lastUpdated = data.get('updated', None)
self.lastLoadLength = len(data.get('items', []))
self.googleReader.itemsToObjects(self, data.get('items', []))
self.lastLoadOk = True
def _addItem(self, item):
self.items.append(item)
self.itemsById[item.id] = item
def getItem(self, id):
return self.itemsById[id]
def clearItems(self):
self.items = []
self.itemsById = {}
self.continuation = None
def getItems(self):
return self.items
def countItems(self, excludeRead=False):
if excludeRead:
sum([1 for item in self.items if item.isUnread()])
else:
return len(self.items)
def markItemRead(self, item, read):
if read and item.isUnread():
self.unread -= 1
elif not read and item.isRead():
self.unread += 1
def markAllRead(self):
self.unread = 0
for item in self.items:
item.read = True
item.canUnread = False
result = self.googleReader.markFeedAsRead(self)
return result.upper() == 'OK'
def countUnread(self):
self.unread = self.countItems(excludeRead=True)
|
askedrelic/libgreader | libgreader/items.py | ItemsContainer._itemsLoadedDone | python | def _itemsLoadedDone(self, data):
if data is None:
return
self.continuation = data.get('continuation', None)
self.lastUpdated = data.get('updated', None)
self.lastLoadLength = len(data.get('items', []))
self.googleReader.itemsToObjects(self, data.get('items', []))
self.lastLoadOk = True | Called when all items are loaded | train | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/items.py#L46-L56 | null | class ItemsContainer(object):
"""
A base class used for all classes aimed to have items (Categories and Feeds)
"""
def __init__(self):
self.items = []
self.itemsById = {}
self.lastLoadOk = False
self.lastLoadLength = 0
self.lastUpdated = None
self.unread = 0
self.continuation = None
def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Get content from google reader with specified parameters.
Must be overladed in inherited clases
"""
return None
def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):
"""
Load items and call itemsLoadedDone to transform data in objects
"""
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until))
def loadMoreItems(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Load more items using the continuation parameters of previously loaded items.
"""
self.lastLoadOk = False
self.lastLoadLength = 0
if not continuation and not self.continuation:
return
self._itemsLoadedDone(self._getContent(excludeRead, continuation or self.continuation, loadLimit, since, until))
def _addItem(self, item):
self.items.append(item)
self.itemsById[item.id] = item
def getItem(self, id):
return self.itemsById[id]
def clearItems(self):
self.items = []
self.itemsById = {}
self.continuation = None
def getItems(self):
return self.items
def countItems(self, excludeRead=False):
if excludeRead:
sum([1 for item in self.items if item.isUnread()])
else:
return len(self.items)
def markItemRead(self, item, read):
if read and item.isUnread():
self.unread -= 1
elif not read and item.isRead():
self.unread += 1
def markAllRead(self):
self.unread = 0
for item in self.items:
item.read = True
item.canUnread = False
result = self.googleReader.markFeedAsRead(self)
return result.upper() == 'OK'
def countUnread(self):
self.unread = self.countItems(excludeRead=True)
|
Carbonara-Project/Guanciale | guanciale/idblib.py | idaunpack | python | def idaunpack(buf):
"""
Special data packing format, used in struct definitions, and .id2 files
sdk functions: pack_dd etc.
"""
buf = bytearray(buf)
def nextval(o):
val = buf[o] ; o += 1
if val == 0xff: # 32 bit value
val, = struct.unpack_from(">L", buf, o)
o += 4
return val, o
if val < 0x80: # 7 bit value
return val, o
val <<= 8
val |= buf[o] ; o += 1
if val < 0xc000: # 14 bit value
return val & 0x3fff, o
# 29 bit value
val <<= 8
val |= buf[o] ; o += 1
val <<= 8
val |= buf[o] ; o += 1
return val & 0x1fffffff, o
values = []
o = 0
while o < len(buf):
val, o = nextval(o)
values.append(val)
return values | Special data packing format, used in struct definitions, and .id2 files
sdk functions: pack_dd etc. | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L161-L194 | [
"def nextval(o):\n val = buf[o] ; o += 1\n if val == 0xff: # 32 bit value\n val, = struct.unpack_from(\">L\", buf, o)\n o += 4\n return val, o\n if val < 0x80: # 7 bit value\n return val, o\n val <<= 8\n val |= buf[o] ; o += 1\n if val < 0xc000: # 14 bit value\n ... | """
idblib - a module for reading hex-rays Interactive DisAssembler databases
Supports database versions starting with IDA v2.0
IDA v1.x is not supported, that was an entirely different file format.
IDA v2.x databases are organised as several files, in a directory
IDA v3.x databases are bundled into .idb files
IDA v4 .. v6 various improvements, like databases larger than 4Gig, and 64 bit support.
Copyright (c) 2016 Willem Hengeveld <itsme@xs4all.nl>
An IDB file can contain up to 6 sections:
id0 the main database
id1 contains flags for each byte - what is returned by idc.GetFlags(ea)
nam contains a list of addresses of named items
seg .. only in older databases
til type info
id2 ?
The id0 database is a simple key/value database, much like leveldb
types of records:
Some bookkeeping:
"$ MAX NODE" -> the highest numbered node value in use.
A list of names:
"N" + name -> the node id for that name.
names are both user/disassembler symbols assigned to addresses
in the disassembled code, and IDA internals, like lists of items,
For example: '$ structs', or 'Root Node'.
The main part:
"." + nodeid + tag + index
This maps directly onto the idasdk netnode interface.
The size of the nodeid and index is 32bits for .idb files and 64 bits for .i64 files.
The nodeid and index are encoded as bigendian numbers in the key, and as little endian
numbers in (most of) the values.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import struct
import binascii
import re
import os
#############################################################################
# some code to make this library run with both python2 and python3
#############################################################################
import sys
if sys.version_info[0] == 3:
long = int
else:
bytes = bytearray
try:
cmp(1, 2)
except:
# python3 does not have cmp
def cmp(a, b): return (a > b) - (a < b)
def makeStringIO(data):
if sys.version_info[0] == 2:
from StringIO import StringIO
return StringIO(data)
else:
from io import BytesIO
return BytesIO(data)
#############################################################################
# some utility functions
#############################################################################
def nonefmt(fmt, item):
# helper for outputting None without raising an error
if item is None:
return "-"
return fmt % item
def hexdump(data):
if data is None:
return
return binascii.b2a_hex(data).decode('utf-8')
#############################################################################
class FileSection(object):
"""
Presents a file like object which is a section of a larger file.
`fh` is expected to have a seek and read method.
This class is used to access a section (e.g. the .id0 file) of a larger file (e.g. the .idb file)
and make read/seek behave as if it were a seperate file.
"""
def __init__(self, fh, start, end):
self.fh = fh
self.start = start
self.end = end
self.curpos = 0
self.fh.seek(self.start)
def read(self, size=None):
want = self.end - self.start - self.curpos
if size is not None and want > size:
want = size
if want <= 0:
return b""
# make sure filepointer is at correct position since we are sharing the fh object with others.
self.fh.seek(self.curpos + self.start)
data = self.fh.read(want)
self.curpos += len(data)
return data
def seek(self, offset, *args):
def isvalidpos(offset):
return 0 <= offset <= self.end - self.start
if len(args) == 0:
whence = 0
else:
whence = args[0]
if whence == 0:
if not isvalidpos(offset):
print("invalid seek: from %x to SET:%x" % (self.curpos, offset))
raise Exception("illegal offset")
self.curpos = offset
elif whence == 1:
if not isvalidpos(self.curpos + offset):
raise Exception("illegal offset")
self.curpos += offset
elif whence == 2:
if not isvalidpos(self.end - self.start + offset):
raise Exception("illegal offset")
self.curpos = self.end - self.start + offset
self.fh.seek(self.curpos + self.start)
def tell(self):
return self.curpos
def idaunpack(buf):
"""
Special data packing format, used in struct definitions, and .id2 files
sdk functions: pack_dd etc.
"""
buf = bytearray(buf)
def nextval(o):
val = buf[o] ; o += 1
if val == 0xff: # 32 bit value
val, = struct.unpack_from(">L", buf, o)
o += 4
return val, o
if val < 0x80: # 7 bit value
return val, o
val <<= 8
val |= buf[o] ; o += 1
if val < 0xc000: # 14 bit value
return val & 0x3fff, o
# 29 bit value
val <<= 8
val |= buf[o] ; o += 1
val <<= 8
val |= buf[o] ; o += 1
return val & 0x1fffffff, o
values = []
o = 0
while o < len(buf):
val, o = nextval(o)
values.append(val)
return values
class IDBFile(object):
"""
Provide access to the various sections in an .idb file.
Usage:
idb = IDBFile(fhandle)
id0 = idb.getsection(ID0File)
ID0File is expected to have a class property 'INDEX'
# v1..v5 id1 and nam files start with 'Va0' .. 'Va4'
# v6 id1 and nam files start with 'VA*'
# til files start with 'IDATIL'
# id2 files start with 'IDAS\x1d\xa5\x55\x55'
"""
def __init__(self, fh):
""" constructor takes a filehandle """
self.fh = fh
self.fh.seek(0)
hdrdata = self.fh.read(0x100)
self.magic = hdrdata[0:4].decode('utf-8', 'ignore')
if self.magic not in ('IDA0', 'IDA1', 'IDA2'):
raise Exception("invalid file magic")
values = struct.unpack_from("<6LH6L", hdrdata, 6)
if values[5] != 0xaabbccdd:
fileversion = 0
offsets = list(values[0:5])
offsets.append(0)
checksums = [0 for _ in range(6)]
else:
fileversion = values[6]
if fileversion < 5:
offsets = list(values[0:5])
checksums = list(values[8:13])
idsofs, idscheck = struct.unpack_from("<LH" if fileversion == 1 else "<LL", hdrdata, 56)
offsets.append(idsofs)
checksums.append(idscheck)
# note: filever 4 has '0x5c', zeros, md5, more zeroes
else:
values = struct.unpack_from("<QQLLHQQQ5LQL", hdrdata, 6)
offsets = [values[_] for _ in (0, 1, 5, 6, 7, 13)]
checksums = [values[_] for _ in (8, 9, 10, 11, 12, 14)]
# offsets now has offsets to the various idb parts
# id0, id1, nam, seg, til, id2 ( = sparse file )
self.offsets = offsets
self.checksums = checksums
self.fileversion = fileversion
def getsectioninfo(self, i):
"""
Returns a tuple with section parameters by index.
The parameteres are:
* compression flag
* data offset
* data size
* data checksum
Sections are stored in a fixed order: id0, id1, nam, seg, til, id2
"""
if not 0 <= i < len(self.offsets):
return 0, 0, 0, 0
if self.offsets[i] == 0:
return 0, 0, 0, 0
self.fh.seek(self.offsets[i])
if self.fileversion < 5:
comp, size = struct.unpack("<BL", self.fh.read(5))
ofs = self.offsets[i] + 5
else:
comp, size = struct.unpack("<BQ", self.fh.read(9))
ofs = self.offsets[i] + 9
return comp, ofs, size, self.checksums[i]
def getpart(self, ix):
"""
Returns a fileobject for the specified section.
This method optionally decompresses the data found in the .idb file,
and returns a file-like object, with seek, read, tell.
"""
if self.offsets[ix] == 0:
return
comp, ofs, size, checksum = self.getsectioninfo(ix)
fh = FileSection(self.fh, ofs, ofs + size)
if comp == 2:
import zlib
# very old databases used a different compression scheme:
wbits = -15 if self.magic == 'IDA0' else 15
fh = makeStringIO(zlib.decompress(fh.read(size), wbits))
elif comp == 0:
pass
else:
raise Exception("unsupported section encoding: %02x" % comp)
return fh
def getsection(self, cls):
"""
Constructs an object for the specified section.
"""
return cls(self, self.getpart(cls.INDEX))
class RecoverIDBFile:
"""
RecoverIDBFile has the same interface as IDBFile, but expects the database to be split over several files.
This is useful for opening IDAv2.x databases, or for recovering data from unclosed databases.
"""
id2ext = ['.id0', '.id1', '.nam', '.seg', '.til', '.id2']
def __init__(self, args, basepath, dbfiles):
if args.i64:
self.magic = 'IDA2'
else:
self.magic = 'IDA1'
self.basepath = basepath
self.dbfiles = dbfiles
self.fileversion = 0
def getsectioninfo(self, i):
if not 0 <= i < len(self.id2ext):
return 0, 0, 0, 0
ext = self.id2ext[i]
if ext not in self.dbfiles:
return 0, 0, 0, 0
return 0, 0, os.path.getsize(self.dbfiles[ext]), 0
def getpart(self, ix):
if not 0 <= ix < len(self.id2ext):
return None
ext = self.id2ext[ix]
if ext not in self.dbfiles:
print("can't find %s" % ext)
return None
return open(self.dbfiles[ext], "rb")
def getsection(self, cls):
part = self.getpart(cls.INDEX)
if part:
return cls(self, part)
def binary_search(a, k):
"""
Do a binary search in an array of objects ordered by '.key'
returns the largest index for which: a[i].key <= k
like c++: a.upperbound(k)--
"""
first, last = 0, len(a)
while first < last:
mid = (first + last) >> 1
if k < a[mid].key:
last = mid
else:
first = mid + 1
return first - 1
"""
################################################################################
I would have liked to make these classes a nested class of BTree, but
the problem is than there is no way for a nested-nested class
of BTree to refer back to a toplevel nested class of BTree.
So moving these outside of BTree so i can use them as baseclasses
in the various page implementations
class BTree:
class BaseEntry(object): pass
class BasePage(object): pass
class Page15(BasePage):
class Entry(BTree.BaseEntry):
pass
>>> NameError: name 'BTree' is not defined
"""
class BaseIndexEntry(object):
"""
Baseclass for Index Entries.
Index entries have a key + value, and a page containing keys larger than that key
in this index entry.
"""
def __init__(self, data):
ofs = self.recofs
if self.recofs < 6:
# reading an invalid page...
self.val = self.key = None
return
keylen, = struct.unpack_from("<H", data, ofs) ; ofs += 2
self.key = data[ofs:ofs + keylen] ; ofs += keylen
vallen, = struct.unpack_from("<H", data, ofs) ; ofs += 2
self.val = data[ofs:ofs + vallen] ; ofs += vallen
def __repr__(self):
return "%06x: %s = %s" % (self.page, hexdump(self.key), hexdump(self.val))
class BaseLeafEntry(BaseIndexEntry):
"""
Baseclass for Leaf Entries
Leaf entries have a key + value, and an `indent`
The `indent` is there to save space in the index, since subsequent keys
usually are very similar.
The indent specifies the offset where this key is different from the previous key
"""
def __init__(self, key, data):
""" leaf entries get the previous key a an argument. """
super(BaseLeafEntry, self).__init__(data)
self.key = key[:self.indent] + self.key
def __repr__(self):
return " %02x:%02x: %s = %s" % (self.unknown1, self.unknown, hexdump(self.key), hexdump(self.val))
class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
class ID1File(object):
"""
Reads .id1 or 1.IDA files, containing byte flags
This is basically the information for the .idc GetFlags(ea),
FirstSeg(), NextSeg(ea), SegStart(ea), SegEnd(ea) functions
"""
INDEX = 1
class SegInfo:
def __init__(self, startea, endea, offset):
self.startea = startea
self.endea = endea
self.offset = offset
def __init__(self, idb, fh):
if idb.magic == 'IDA2':
wordsize, fmt = 8, "Q"
else:
wordsize, fmt = 4, "L"
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
self.fh = fh
fh.seek(0)
hdrdata = fh.read(32)
magic = hdrdata[:4]
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
nsegments, npages = struct.unpack_from("<HH", hdrdata, 4)
# filesize / npages == 0x2000 for all cases
seglistofs = 8
seginfosize = 3
elif magic == b'VA*\x00':
always3, nsegments, always2k, npages = struct.unpack_from("<LLLL", hdrdata, 4)
if always3 != 3:
print("ID1: first dword != 3: %08x" % always3)
if always2k != 0x800:
print("ID1: third dword != 2k: %08x" % always2k)
seglistofs = 20
seginfosize = 2
else:
raise Exception("unknown id1 magic: %s" % hexdump(magic))
self.seglist = []
# Va0 - ida v3.0.5
# Va3 - ida v3.6
fh.seek(seglistofs)
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
segdata = fh.read(nsegments * 3 * wordsize)
for o in range(nsegments):
startea, endea, id1ofs = struct.unpack_from("<" + fmt + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
elif magic == b'VA*\x00':
segdata = fh.read(nsegments * 2 * wordsize)
id1ofs = 0x2000
for o in range(nsegments):
startea, endea = struct.unpack_from("<" + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
id1ofs += 4 * (endea - startea)
def is32bit_heuristic(self, fh, seglistofs):
fh.seek(seglistofs)
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
def dump(self):
""" print first and last bits for each segment """
for seg in self.seglist:
print("==== %08x-%08x" % (seg.startea, seg.endea))
if seg.endea - seg.startea < 30:
for ea in range(seg.startea, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
else:
for ea in range(seg.startea, seg.startea + 10):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
print("...")
for ea in range(seg.endea - 10, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
def find_segment(self, ea):
""" do a linear search for the given address in the segment list """
for seg in self.seglist:
if seg.startea <= ea < seg.endea:
return seg
def getFlags(self, ea):
seg = self.find_segment(ea)
self.fh.seek(seg.offset + 4 * (ea - seg.startea))
return struct.unpack("<L", self.fh.read(4))[0]
def firstSeg(self):
return self.seglist[0].startea
def nextSeg(self, ea):
for i, seg in enumerate(self.seglist):
if seg.startea <= ea < seg.endea:
if i + 1 < len(self.seglist):
return self.seglist[i + 1].startea
else:
return
def segStart(self, ea):
seg = self.find_segment(ea)
return seg.startea
def segEnd(self, ea):
seg = self.find_segment(ea)
return seg.endea
class NAMFile(object):
""" reads .nam or NAMES.IDA files, containing ptrs to named items """
INDEX = 2
def __init__(self, idb, fh):
if idb.magic == 'IDA2':
wordsize, fmt = 8, "Q"
else:
wordsize, fmt = 4, "L"
self.fh = fh
fh.seek(0)
hdrdata = fh.read(64)
magic = hdrdata[:4]
# Va0 - ida v3.0.5
# Va1 - ida v3.6
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
always1, npages, always0, nnames, pagesize = struct.unpack_from("<HH" + fmt + fmt + "L", hdrdata, 4)
if always1 != 1: print("nam: first hw = %d" % always1)
if always0 != 0: print("nam: third dw = %d" % always0)
elif magic == b'VA*\x00':
always3, always1, always2k, npages, always0, nnames = struct.unpack_from("<LLLL" + fmt + "L", hdrdata, 4)
if always3 != 3: print("nam: 3 hw = %d" % always3)
if always1 != 1: print("nam: 1 hw = %d" % always1)
if always0 != 0: print("nam: 0 dw = %d" % always0)
if always2k != 0x800: print("nam: 2k dw = %d" % always2k)
pagesize = 0x2000
else:
raise Exception("unknown nam magic: %s" % hexdump(magic))
if idb.magic == 'IDA2':
nnames >>= 1
self.wordsize = wordsize
self.wordfmt = fmt
self.nnames = nnames
self.pagesize = pagesize
def dump(self):
print("nam: nnames=%d, npages=%d, pagesize=%08x" % (self.nnames, self.npages, self.pagesize))
def allnames(self):
self.fh.seek(self.pagesize)
n = 0
while n < self.nnames:
data = self.fh.read(self.pagesize)
want = min(self.nnames - n, int(self.pagesize / self.wordsize))
ofslist = struct.unpack_from("<%d%s" % (want, self.wordfmt), data, 0)
for ea in ofslist:
yield ea
n += want
class SEGFile(object):
""" reads .seg or $SEGS.IDA files. """
INDEX = 3
def __init__(self, idb, fh):
pass
class TILFile(object):
""" reads .til files """
INDEX = 4
def __init__(self, idb, fh):
pass
# note: v3 databases had a .reg instead of .til
class ID2File(object):
"""
Reads .id2 files
ID2 sections contain packed data, resulting in tripples
of unknown use.
"""
INDEX = 5
def __init__(self, idb, fh):
pass
|
Carbonara-Project/Guanciale | guanciale/idblib.py | binary_search | python | def binary_search(a, k):
"""
Do a binary search in an array of objects ordered by '.key'
returns the largest index for which: a[i].key <= k
like c++: a.upperbound(k)--
"""
first, last = 0, len(a)
while first < last:
mid = (first + last) >> 1
if k < a[mid].key:
last = mid
else:
first = mid + 1
return first - 1 | Do a binary search in an array of objects ordered by '.key'
returns the largest index for which: a[i].key <= k
like c++: a.upperbound(k)-- | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L351-L366 | null | """
idblib - a module for reading hex-rays Interactive DisAssembler databases
Supports database versions starting with IDA v2.0
IDA v1.x is not supported, that was an entirely different file format.
IDA v2.x databases are organised as several files, in a directory
IDA v3.x databases are bundled into .idb files
IDA v4 .. v6 various improvements, like databases larger than 4Gig, and 64 bit support.
Copyright (c) 2016 Willem Hengeveld <itsme@xs4all.nl>
An IDB file can contain up to 6 sections:
id0 the main database
id1 contains flags for each byte - what is returned by idc.GetFlags(ea)
nam contains a list of addresses of named items
seg .. only in older databases
til type info
id2 ?
The id0 database is a simple key/value database, much like leveldb
types of records:
Some bookkeeping:
"$ MAX NODE" -> the highest numbered node value in use.
A list of names:
"N" + name -> the node id for that name.
names are both user/disassembler symbols assigned to addresses
in the disassembled code, and IDA internals, like lists of items,
For example: '$ structs', or 'Root Node'.
The main part:
"." + nodeid + tag + index
This maps directly onto the idasdk netnode interface.
The size of the nodeid and index is 32bits for .idb files and 64 bits for .i64 files.
The nodeid and index are encoded as bigendian numbers in the key, and as little endian
numbers in (most of) the values.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import struct
import binascii
import re
import os
#############################################################################
# some code to make this library run with both python2 and python3
#############################################################################
import sys
if sys.version_info[0] == 3:
long = int
else:
bytes = bytearray
try:
cmp(1, 2)
except:
# python3 does not have cmp
def cmp(a, b): return (a > b) - (a < b)
def makeStringIO(data):
if sys.version_info[0] == 2:
from StringIO import StringIO
return StringIO(data)
else:
from io import BytesIO
return BytesIO(data)
#############################################################################
# some utility functions
#############################################################################
def nonefmt(fmt, item):
# helper for outputting None without raising an error
if item is None:
return "-"
return fmt % item
def hexdump(data):
if data is None:
return
return binascii.b2a_hex(data).decode('utf-8')
#############################################################################
class FileSection(object):
"""
Presents a file like object which is a section of a larger file.
`fh` is expected to have a seek and read method.
This class is used to access a section (e.g. the .id0 file) of a larger file (e.g. the .idb file)
and make read/seek behave as if it were a seperate file.
"""
def __init__(self, fh, start, end):
self.fh = fh
self.start = start
self.end = end
self.curpos = 0
self.fh.seek(self.start)
def read(self, size=None):
want = self.end - self.start - self.curpos
if size is not None and want > size:
want = size
if want <= 0:
return b""
# make sure filepointer is at correct position since we are sharing the fh object with others.
self.fh.seek(self.curpos + self.start)
data = self.fh.read(want)
self.curpos += len(data)
return data
def seek(self, offset, *args):
def isvalidpos(offset):
return 0 <= offset <= self.end - self.start
if len(args) == 0:
whence = 0
else:
whence = args[0]
if whence == 0:
if not isvalidpos(offset):
print("invalid seek: from %x to SET:%x" % (self.curpos, offset))
raise Exception("illegal offset")
self.curpos = offset
elif whence == 1:
if not isvalidpos(self.curpos + offset):
raise Exception("illegal offset")
self.curpos += offset
elif whence == 2:
if not isvalidpos(self.end - self.start + offset):
raise Exception("illegal offset")
self.curpos = self.end - self.start + offset
self.fh.seek(self.curpos + self.start)
def tell(self):
return self.curpos
def idaunpack(buf):
"""
Special data packing format, used in struct definitions, and .id2 files
sdk functions: pack_dd etc.
"""
buf = bytearray(buf)
def nextval(o):
val = buf[o] ; o += 1
if val == 0xff: # 32 bit value
val, = struct.unpack_from(">L", buf, o)
o += 4
return val, o
if val < 0x80: # 7 bit value
return val, o
val <<= 8
val |= buf[o] ; o += 1
if val < 0xc000: # 14 bit value
return val & 0x3fff, o
# 29 bit value
val <<= 8
val |= buf[o] ; o += 1
val <<= 8
val |= buf[o] ; o += 1
return val & 0x1fffffff, o
values = []
o = 0
while o < len(buf):
val, o = nextval(o)
values.append(val)
return values
class IDBFile(object):
"""
Provide access to the various sections in an .idb file.
Usage:
idb = IDBFile(fhandle)
id0 = idb.getsection(ID0File)
ID0File is expected to have a class property 'INDEX'
# v1..v5 id1 and nam files start with 'Va0' .. 'Va4'
# v6 id1 and nam files start with 'VA*'
# til files start with 'IDATIL'
# id2 files start with 'IDAS\x1d\xa5\x55\x55'
"""
def __init__(self, fh):
""" constructor takes a filehandle """
self.fh = fh
self.fh.seek(0)
hdrdata = self.fh.read(0x100)
self.magic = hdrdata[0:4].decode('utf-8', 'ignore')
if self.magic not in ('IDA0', 'IDA1', 'IDA2'):
raise Exception("invalid file magic")
values = struct.unpack_from("<6LH6L", hdrdata, 6)
if values[5] != 0xaabbccdd:
fileversion = 0
offsets = list(values[0:5])
offsets.append(0)
checksums = [0 for _ in range(6)]
else:
fileversion = values[6]
if fileversion < 5:
offsets = list(values[0:5])
checksums = list(values[8:13])
idsofs, idscheck = struct.unpack_from("<LH" if fileversion == 1 else "<LL", hdrdata, 56)
offsets.append(idsofs)
checksums.append(idscheck)
# note: filever 4 has '0x5c', zeros, md5, more zeroes
else:
values = struct.unpack_from("<QQLLHQQQ5LQL", hdrdata, 6)
offsets = [values[_] for _ in (0, 1, 5, 6, 7, 13)]
checksums = [values[_] for _ in (8, 9, 10, 11, 12, 14)]
# offsets now has offsets to the various idb parts
# id0, id1, nam, seg, til, id2 ( = sparse file )
self.offsets = offsets
self.checksums = checksums
self.fileversion = fileversion
def getsectioninfo(self, i):
"""
Returns a tuple with section parameters by index.
The parameteres are:
* compression flag
* data offset
* data size
* data checksum
Sections are stored in a fixed order: id0, id1, nam, seg, til, id2
"""
if not 0 <= i < len(self.offsets):
return 0, 0, 0, 0
if self.offsets[i] == 0:
return 0, 0, 0, 0
self.fh.seek(self.offsets[i])
if self.fileversion < 5:
comp, size = struct.unpack("<BL", self.fh.read(5))
ofs = self.offsets[i] + 5
else:
comp, size = struct.unpack("<BQ", self.fh.read(9))
ofs = self.offsets[i] + 9
return comp, ofs, size, self.checksums[i]
def getpart(self, ix):
"""
Returns a fileobject for the specified section.
This method optionally decompresses the data found in the .idb file,
and returns a file-like object, with seek, read, tell.
"""
if self.offsets[ix] == 0:
return
comp, ofs, size, checksum = self.getsectioninfo(ix)
fh = FileSection(self.fh, ofs, ofs + size)
if comp == 2:
import zlib
# very old databases used a different compression scheme:
wbits = -15 if self.magic == 'IDA0' else 15
fh = makeStringIO(zlib.decompress(fh.read(size), wbits))
elif comp == 0:
pass
else:
raise Exception("unsupported section encoding: %02x" % comp)
return fh
def getsection(self, cls):
"""
Constructs an object for the specified section.
"""
return cls(self, self.getpart(cls.INDEX))
class RecoverIDBFile:
"""
RecoverIDBFile has the same interface as IDBFile, but expects the database to be split over several files.
This is useful for opening IDAv2.x databases, or for recovering data from unclosed databases.
"""
id2ext = ['.id0', '.id1', '.nam', '.seg', '.til', '.id2']
def __init__(self, args, basepath, dbfiles):
if args.i64:
self.magic = 'IDA2'
else:
self.magic = 'IDA1'
self.basepath = basepath
self.dbfiles = dbfiles
self.fileversion = 0
def getsectioninfo(self, i):
if not 0 <= i < len(self.id2ext):
return 0, 0, 0, 0
ext = self.id2ext[i]
if ext not in self.dbfiles:
return 0, 0, 0, 0
return 0, 0, os.path.getsize(self.dbfiles[ext]), 0
def getpart(self, ix):
if not 0 <= ix < len(self.id2ext):
return None
ext = self.id2ext[ix]
if ext not in self.dbfiles:
print("can't find %s" % ext)
return None
return open(self.dbfiles[ext], "rb")
def getsection(self, cls):
part = self.getpart(cls.INDEX)
if part:
return cls(self, part)
def binary_search(a, k):
"""
Do a binary search in an array of objects ordered by '.key'
returns the largest index for which: a[i].key <= k
like c++: a.upperbound(k)--
"""
first, last = 0, len(a)
while first < last:
mid = (first + last) >> 1
if k < a[mid].key:
last = mid
else:
first = mid + 1
return first - 1
"""
################################################################################
I would have liked to make these classes a nested class of BTree, but
the problem is than there is no way for a nested-nested class
of BTree to refer back to a toplevel nested class of BTree.
So moving these outside of BTree so i can use them as baseclasses
in the various page implementations
class BTree:
class BaseEntry(object): pass
class BasePage(object): pass
class Page15(BasePage):
class Entry(BTree.BaseEntry):
pass
>>> NameError: name 'BTree' is not defined
"""
class BaseIndexEntry(object):
"""
Baseclass for Index Entries.
Index entries have a key + value, and a page containing keys larger than that key
in this index entry.
"""
def __init__(self, data):
ofs = self.recofs
if self.recofs < 6:
# reading an invalid page...
self.val = self.key = None
return
keylen, = struct.unpack_from("<H", data, ofs) ; ofs += 2
self.key = data[ofs:ofs + keylen] ; ofs += keylen
vallen, = struct.unpack_from("<H", data, ofs) ; ofs += 2
self.val = data[ofs:ofs + vallen] ; ofs += vallen
def __repr__(self):
return "%06x: %s = %s" % (self.page, hexdump(self.key), hexdump(self.val))
class BaseLeafEntry(BaseIndexEntry):
"""
Baseclass for Leaf Entries
Leaf entries have a key + value, and an `indent`
The `indent` is there to save space in the index, since subsequent keys
usually are very similar.
The indent specifies the offset where this key is different from the previous key
"""
def __init__(self, key, data):
""" leaf entries get the previous key a an argument. """
super(BaseLeafEntry, self).__init__(data)
self.key = key[:self.indent] + self.key
def __repr__(self):
return " %02x:%02x: %s = %s" % (self.unknown1, self.unknown, hexdump(self.key), hexdump(self.val))
class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
class ID1File(object):
"""
Reads .id1 or 1.IDA files, containing byte flags
This is basically the information for the .idc GetFlags(ea),
FirstSeg(), NextSeg(ea), SegStart(ea), SegEnd(ea) functions
"""
INDEX = 1
class SegInfo:
def __init__(self, startea, endea, offset):
self.startea = startea
self.endea = endea
self.offset = offset
def __init__(self, idb, fh):
if idb.magic == 'IDA2':
wordsize, fmt = 8, "Q"
else:
wordsize, fmt = 4, "L"
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
self.fh = fh
fh.seek(0)
hdrdata = fh.read(32)
magic = hdrdata[:4]
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
nsegments, npages = struct.unpack_from("<HH", hdrdata, 4)
# filesize / npages == 0x2000 for all cases
seglistofs = 8
seginfosize = 3
elif magic == b'VA*\x00':
always3, nsegments, always2k, npages = struct.unpack_from("<LLLL", hdrdata, 4)
if always3 != 3:
print("ID1: first dword != 3: %08x" % always3)
if always2k != 0x800:
print("ID1: third dword != 2k: %08x" % always2k)
seglistofs = 20
seginfosize = 2
else:
raise Exception("unknown id1 magic: %s" % hexdump(magic))
self.seglist = []
# Va0 - ida v3.0.5
# Va3 - ida v3.6
fh.seek(seglistofs)
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
segdata = fh.read(nsegments * 3 * wordsize)
for o in range(nsegments):
startea, endea, id1ofs = struct.unpack_from("<" + fmt + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
elif magic == b'VA*\x00':
segdata = fh.read(nsegments * 2 * wordsize)
id1ofs = 0x2000
for o in range(nsegments):
startea, endea = struct.unpack_from("<" + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
id1ofs += 4 * (endea - startea)
def is32bit_heuristic(self, fh, seglistofs):
fh.seek(seglistofs)
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
def dump(self):
""" print first and last bits for each segment """
for seg in self.seglist:
print("==== %08x-%08x" % (seg.startea, seg.endea))
if seg.endea - seg.startea < 30:
for ea in range(seg.startea, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
else:
for ea in range(seg.startea, seg.startea + 10):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
print("...")
for ea in range(seg.endea - 10, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
def find_segment(self, ea):
""" do a linear search for the given address in the segment list """
for seg in self.seglist:
if seg.startea <= ea < seg.endea:
return seg
def getFlags(self, ea):
seg = self.find_segment(ea)
self.fh.seek(seg.offset + 4 * (ea - seg.startea))
return struct.unpack("<L", self.fh.read(4))[0]
def firstSeg(self):
return self.seglist[0].startea
def nextSeg(self, ea):
for i, seg in enumerate(self.seglist):
if seg.startea <= ea < seg.endea:
if i + 1 < len(self.seglist):
return self.seglist[i + 1].startea
else:
return
def segStart(self, ea):
seg = self.find_segment(ea)
return seg.startea
def segEnd(self, ea):
seg = self.find_segment(ea)
return seg.endea
class NAMFile(object):
""" reads .nam or NAMES.IDA files, containing ptrs to named items """
INDEX = 2
def __init__(self, idb, fh):
if idb.magic == 'IDA2':
wordsize, fmt = 8, "Q"
else:
wordsize, fmt = 4, "L"
self.fh = fh
fh.seek(0)
hdrdata = fh.read(64)
magic = hdrdata[:4]
# Va0 - ida v3.0.5
# Va1 - ida v3.6
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
always1, npages, always0, nnames, pagesize = struct.unpack_from("<HH" + fmt + fmt + "L", hdrdata, 4)
if always1 != 1: print("nam: first hw = %d" % always1)
if always0 != 0: print("nam: third dw = %d" % always0)
elif magic == b'VA*\x00':
always3, always1, always2k, npages, always0, nnames = struct.unpack_from("<LLLL" + fmt + "L", hdrdata, 4)
if always3 != 3: print("nam: 3 hw = %d" % always3)
if always1 != 1: print("nam: 1 hw = %d" % always1)
if always0 != 0: print("nam: 0 dw = %d" % always0)
if always2k != 0x800: print("nam: 2k dw = %d" % always2k)
pagesize = 0x2000
else:
raise Exception("unknown nam magic: %s" % hexdump(magic))
if idb.magic == 'IDA2':
nnames >>= 1
self.wordsize = wordsize
self.wordfmt = fmt
self.nnames = nnames
self.pagesize = pagesize
def dump(self):
print("nam: nnames=%d, npages=%d, pagesize=%08x" % (self.nnames, self.npages, self.pagesize))
def allnames(self):
self.fh.seek(self.pagesize)
n = 0
while n < self.nnames:
data = self.fh.read(self.pagesize)
want = min(self.nnames - n, int(self.pagesize / self.wordsize))
ofslist = struct.unpack_from("<%d%s" % (want, self.wordfmt), data, 0)
for ea in ofslist:
yield ea
n += want
class SEGFile(object):
""" reads .seg or $SEGS.IDA files. """
INDEX = 3
def __init__(self, idb, fh):
pass
class TILFile(object):
""" reads .til files """
INDEX = 4
def __init__(self, idb, fh):
pass
# note: v3 databases had a .reg instead of .til
class ID2File(object):
"""
Reads .id2 files
ID2 sections contain packed data, resulting in tripples
of unknown use.
"""
INDEX = 5
def __init__(self, idb, fh):
pass
|
Carbonara-Project/Guanciale | guanciale/idblib.py | IDBFile.getsectioninfo | python | def getsectioninfo(self, i):
"""
Returns a tuple with section parameters by index.
The parameteres are:
* compression flag
* data offset
* data size
* data checksum
Sections are stored in a fixed order: id0, id1, nam, seg, til, id2
"""
if not 0 <= i < len(self.offsets):
return 0, 0, 0, 0
if self.offsets[i] == 0:
return 0, 0, 0, 0
self.fh.seek(self.offsets[i])
if self.fileversion < 5:
comp, size = struct.unpack("<BL", self.fh.read(5))
ofs = self.offsets[i] + 5
else:
comp, size = struct.unpack("<BQ", self.fh.read(9))
ofs = self.offsets[i] + 9
return comp, ofs, size, self.checksums[i] | Returns a tuple with section parameters by index.
The parameteres are:
* compression flag
* data offset
* data size
* data checksum
Sections are stored in a fixed order: id0, id1, nam, seg, til, id2 | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L252-L277 | null | class IDBFile(object):
"""
Provide access to the various sections in an .idb file.
Usage:
idb = IDBFile(fhandle)
id0 = idb.getsection(ID0File)
ID0File is expected to have a class property 'INDEX'
# v1..v5 id1 and nam files start with 'Va0' .. 'Va4'
# v6 id1 and nam files start with 'VA*'
# til files start with 'IDATIL'
# id2 files start with 'IDAS\x1d\xa5\x55\x55'
"""
def __init__(self, fh):
""" constructor takes a filehandle """
self.fh = fh
self.fh.seek(0)
hdrdata = self.fh.read(0x100)
self.magic = hdrdata[0:4].decode('utf-8', 'ignore')
if self.magic not in ('IDA0', 'IDA1', 'IDA2'):
raise Exception("invalid file magic")
values = struct.unpack_from("<6LH6L", hdrdata, 6)
if values[5] != 0xaabbccdd:
fileversion = 0
offsets = list(values[0:5])
offsets.append(0)
checksums = [0 for _ in range(6)]
else:
fileversion = values[6]
if fileversion < 5:
offsets = list(values[0:5])
checksums = list(values[8:13])
idsofs, idscheck = struct.unpack_from("<LH" if fileversion == 1 else "<LL", hdrdata, 56)
offsets.append(idsofs)
checksums.append(idscheck)
# note: filever 4 has '0x5c', zeros, md5, more zeroes
else:
values = struct.unpack_from("<QQLLHQQQ5LQL", hdrdata, 6)
offsets = [values[_] for _ in (0, 1, 5, 6, 7, 13)]
checksums = [values[_] for _ in (8, 9, 10, 11, 12, 14)]
# offsets now has offsets to the various idb parts
# id0, id1, nam, seg, til, id2 ( = sparse file )
self.offsets = offsets
self.checksums = checksums
self.fileversion = fileversion
def getsectioninfo(self, i):
"""
Returns a tuple with section parameters by index.
The parameteres are:
* compression flag
* data offset
* data size
* data checksum
Sections are stored in a fixed order: id0, id1, nam, seg, til, id2
"""
if not 0 <= i < len(self.offsets):
return 0, 0, 0, 0
if self.offsets[i] == 0:
return 0, 0, 0, 0
self.fh.seek(self.offsets[i])
if self.fileversion < 5:
comp, size = struct.unpack("<BL", self.fh.read(5))
ofs = self.offsets[i] + 5
else:
comp, size = struct.unpack("<BQ", self.fh.read(9))
ofs = self.offsets[i] + 9
return comp, ofs, size, self.checksums[i]
def getpart(self, ix):
"""
Returns a fileobject for the specified section.
This method optionally decompresses the data found in the .idb file,
and returns a file-like object, with seek, read, tell.
"""
if self.offsets[ix] == 0:
return
comp, ofs, size, checksum = self.getsectioninfo(ix)
fh = FileSection(self.fh, ofs, ofs + size)
if comp == 2:
import zlib
# very old databases used a different compression scheme:
wbits = -15 if self.magic == 'IDA0' else 15
fh = makeStringIO(zlib.decompress(fh.read(size), wbits))
elif comp == 0:
pass
else:
raise Exception("unsupported section encoding: %02x" % comp)
return fh
def getsection(self, cls):
"""
Constructs an object for the specified section.
"""
return cls(self, self.getpart(cls.INDEX))
|
Carbonara-Project/Guanciale | guanciale/idblib.py | IDBFile.getpart | python | def getpart(self, ix):
"""
Returns a fileobject for the specified section.
This method optionally decompresses the data found in the .idb file,
and returns a file-like object, with seek, read, tell.
"""
if self.offsets[ix] == 0:
return
comp, ofs, size, checksum = self.getsectioninfo(ix)
fh = FileSection(self.fh, ofs, ofs + size)
if comp == 2:
import zlib
# very old databases used a different compression scheme:
wbits = -15 if self.magic == 'IDA0' else 15
fh = makeStringIO(zlib.decompress(fh.read(size), wbits))
elif comp == 0:
pass
else:
raise Exception("unsupported section encoding: %02x" % comp)
return fh | Returns a fileobject for the specified section.
This method optionally decompresses the data found in the .idb file,
and returns a file-like object, with seek, read, tell. | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L279-L302 | [
"def makeStringIO(data):\n if sys.version_info[0] == 2:\n from StringIO import StringIO\n return StringIO(data)\n else:\n from io import BytesIO\n return BytesIO(data)\n",
"def read(self, size=None):\n want = self.end - self.start - self.curpos\n if size is not None and wan... | class IDBFile(object):
"""
Provide access to the various sections in an .idb file.
Usage:
idb = IDBFile(fhandle)
id0 = idb.getsection(ID0File)
ID0File is expected to have a class property 'INDEX'
# v1..v5 id1 and nam files start with 'Va0' .. 'Va4'
# v6 id1 and nam files start with 'VA*'
# til files start with 'IDATIL'
# id2 files start with 'IDAS\x1d\xa5\x55\x55'
"""
def __init__(self, fh):
""" constructor takes a filehandle """
self.fh = fh
self.fh.seek(0)
hdrdata = self.fh.read(0x100)
self.magic = hdrdata[0:4].decode('utf-8', 'ignore')
if self.magic not in ('IDA0', 'IDA1', 'IDA2'):
raise Exception("invalid file magic")
values = struct.unpack_from("<6LH6L", hdrdata, 6)
if values[5] != 0xaabbccdd:
fileversion = 0
offsets = list(values[0:5])
offsets.append(0)
checksums = [0 for _ in range(6)]
else:
fileversion = values[6]
if fileversion < 5:
offsets = list(values[0:5])
checksums = list(values[8:13])
idsofs, idscheck = struct.unpack_from("<LH" if fileversion == 1 else "<LL", hdrdata, 56)
offsets.append(idsofs)
checksums.append(idscheck)
# note: filever 4 has '0x5c', zeros, md5, more zeroes
else:
values = struct.unpack_from("<QQLLHQQQ5LQL", hdrdata, 6)
offsets = [values[_] for _ in (0, 1, 5, 6, 7, 13)]
checksums = [values[_] for _ in (8, 9, 10, 11, 12, 14)]
# offsets now has offsets to the various idb parts
# id0, id1, nam, seg, til, id2 ( = sparse file )
self.offsets = offsets
self.checksums = checksums
self.fileversion = fileversion
def getsectioninfo(self, i):
"""
Returns a tuple with section parameters by index.
The parameteres are:
* compression flag
* data offset
* data size
* data checksum
Sections are stored in a fixed order: id0, id1, nam, seg, til, id2
"""
if not 0 <= i < len(self.offsets):
return 0, 0, 0, 0
if self.offsets[i] == 0:
return 0, 0, 0, 0
self.fh.seek(self.offsets[i])
if self.fileversion < 5:
comp, size = struct.unpack("<BL", self.fh.read(5))
ofs = self.offsets[i] + 5
else:
comp, size = struct.unpack("<BQ", self.fh.read(9))
ofs = self.offsets[i] + 9
return comp, ofs, size, self.checksums[i]
def getpart(self, ix):
"""
Returns a fileobject for the specified section.
This method optionally decompresses the data found in the .idb file,
and returns a file-like object, with seek, read, tell.
"""
if self.offsets[ix] == 0:
return
comp, ofs, size, checksum = self.getsectioninfo(ix)
fh = FileSection(self.fh, ofs, ofs + size)
if comp == 2:
import zlib
# very old databases used a different compression scheme:
wbits = -15 if self.magic == 'IDA0' else 15
fh = makeStringIO(zlib.decompress(fh.read(size), wbits))
elif comp == 0:
pass
else:
raise Exception("unsupported section encoding: %02x" % comp)
return fh
def getsection(self, cls):
"""
Constructs an object for the specified section.
"""
return cls(self, self.getpart(cls.INDEX))
|
Carbonara-Project/Guanciale | guanciale/idblib.py | BTree.find | python | def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor | Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L668-L713 | [
"def readpage(self, nr):\n self.fh.seek(nr * self.pagesize)\n return self.page(self.fh.read(self.pagesize))\n",
"def next(self):\n \"\"\" move cursor to next entry \"\"\"\n page, ix = self.stack.pop()\n if page.isleaf():\n # from leaf move towards root\n ix += 1\n while self.st... | class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
|
Carbonara-Project/Guanciale | guanciale/idblib.py | BTree.dump | python | def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex) | raw dump of all records in the b-tree | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L715-L719 | [
"def dumpfree(self):\n \"\"\" list all free pages \"\"\"\n fmt = \"L\" if self.version > 15 else \"H\"\n hdrsize = 8 if self.version > 15 else 4\n pn = self.firstfree\n if pn == 0:\n print(\"no free pages\")\n return\n while pn:\n self.fh.seek(pn * self.pagesize)\n data... | class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
|
Carbonara-Project/Guanciale | guanciale/idblib.py | BTree.dumpfree | python | def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree | list all free pages | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L721-L742 | [
"def hexdump(data):\n if data is None:\n return\n return binascii.b2a_hex(data).decode('utf-8')\n"
] | class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
|
Carbonara-Project/Guanciale | guanciale/idblib.py | BTree.dumpindented | python | def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1) | Dump all nodes of the current page with keys indented, showing how the `indent`
feature works | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L744-L756 | [
"def readpage(self, nr):\n self.fh.seek(nr * self.pagesize)\n return self.page(self.fh.read(self.pagesize))\n",
"def dumpindented(self, pn, indent=0):\n \"\"\"\n Dump all nodes of the current page with keys indented, showing how the `indent`\n feature works\n \"\"\"\n page = self.readpage(pn)... | class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
|
Carbonara-Project/Guanciale | guanciale/idblib.py | BTree.dumptree | python | def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page) | Walks entire tree, dumping all records on each page
in sequential order | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L758-L770 | [
"def readpage(self, nr):\n self.fh.seek(nr * self.pagesize)\n return self.page(self.fh.read(self.pagesize))\n",
"def dumptree(self, pn):\n \"\"\"\n Walks entire tree, dumping all records on each page\n in sequential order\n \"\"\"\n page = self.readpage(pn)\n print(\"%06x: preceeding = %06... | class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
|
Carbonara-Project/Guanciale | guanciale/idblib.py | BTree.pagedump | python | def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1 | dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption. | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L772-L798 | null | class BTree(object):
"""
BTree is the IDA main database engine.
It allows the user to do a binary search for records with
a specified key relation ( >, <, ==, >=, <= )
"""
class BasePage(object):
"""
Baseclass for Pages. for the various btree versions ( 1.5, 1.6 and 2.0 )
there are subclasses which specify the exact layout of the page header,
and index / leaf entries.
Leaf pages don't have a 'preceeding' page pointer.
"""
def __init__(self, data, entsize, entfmt):
self.preceeding, self.count = struct.unpack_from(entfmt, data)
if self.preceeding:
entrytype = self.IndexEntry
else:
entrytype = self.LeafEntry
self.index = []
key = b""
for i in range(self.count):
ent = entrytype(key, data, entsize * (1 + i))
self.index.append(ent)
key = ent.key
self.unknown, self.freeptr = struct.unpack_from(entfmt, data, entsize * (1 + self.count))
def find(self, key):
"""
Searches pages for key, returns relation to key:
recurse -> found a next level index page to search for key.
also returns the next level page nr
gt -> found a value with a key greater than the one searched for.
lt -> found a value with a key less than the one searched for.
eq -> found a value with a key equal to the one searched for.
gt, lt and eq return the index for the key found.
# for an index entry: the key is 'less' than anything in the page pointed to.
"""
i = binary_search(self.index, key)
if i < 0:
if self.isindex():
return ('recurse', -1)
return ('gt', 0)
if self.index[i].key == key:
return ('eq', i)
if self.isindex():
return ('recurse', i)
return ('lt', i)
def getpage(self, ix):
""" For Indexpages, returns the page ptr for the specified entry """
return self.preceeding if ix < 0 else self.index[ix].page
def getkey(self, ix):
""" For all page types, returns the key for the specified entry """
return self.index[ix].key
def getval(self, ix):
""" For all page types, returns the value for the specified entry """
return self.index[ix].val
def isleaf(self):
""" True when this is a Leaf Page """
return self.preceeding == 0
def isindex(self):
""" True when this is an Index Page """
return self.preceeding != 0
def __repr__(self):
return ("leaf" if self.isleaf() else ("index<%d>" % self.preceeding)) + repr(self.index)
######################################################
# Page objects for the various versions of the database
######################################################
class Page15(BasePage):
""" v1.5 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<HH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<BBH", data, ofs)
self.unknown1 = 0
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 4, "<HH")
class Page16(BasePage):
""" v1.6 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown1, self.unknown, self.recofs = struct.unpack_from("<BBHH", data, ofs)
self.recofs += 1 # skip unused zero byte in each key/value record
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Page20(BasePage):
""" v2.0 b-tree page """
class IndexEntry(BaseIndexEntry):
def __init__(self, key, data, ofs):
self.page, self.recofs = struct.unpack_from("<LH", data, ofs)
# unused zero byte is no longer there in v2.0 b-tree
super(self.__class__, self).__init__(data)
class LeafEntry(BaseLeafEntry):
def __init__(self, key, data, ofs):
self.indent, self.unknown, self.recofs = struct.unpack_from("<HHH", data, ofs)
self.unknown1 = 0
super(self.__class__, self).__init__(key, data)
def __init__(self, data):
super(self.__class__, self).__init__(data, 6, "<LH")
class Cursor:
"""
A Cursor object represents a position in the b-tree.
It has methods for moving to the next or previous item.
And methods for retrieving the key and value of the current position
The position is represented as a list of (page, index) tuples
"""
def __init__(self, db, stack):
self.db = db
self.stack = stack
def next(self):
""" move cursor to next entry """
page, ix = self.stack.pop()
if page.isleaf():
# from leaf move towards root
ix += 1
while self.stack and ix == len(page.index):
page, ix = self.stack.pop()
ix += 1
if ix < len(page.index):
self.stack.append((page, ix))
else:
# from node move towards leaf
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
while page.isindex():
ix = -1
self.stack.append((page, ix))
page = self.db.readpage(page.getpage(ix))
ix = 0
self.stack.append((page, ix))
def prev(self):
""" move cursor to the previous entry """
page, ix = self.stack.pop()
ix -= 1
if page.isleaf():
# move towards root, until non 'prec' item found
while self.stack and ix < 0:
page, ix = self.stack.pop()
if ix >= 0:
self.stack.append((page, ix))
else:
# move towards leaf
self.stack.append((page, ix))
while page.isindex():
page = self.db.readpage(page.getpage(ix))
ix = len(page.index) - 1
self.stack.append((page, ix))
def eof(self):
return len(self.stack) == 0
def getkey(self):
""" return the key value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getkey(ix)
def getval(self):
""" return the data value pointed to by the cursor """
page, ix = self.stack[-1]
return page.getval(ix)
def __repr__(self):
return "cursor:" + repr(self.stack)
def __init__(self, fh):
""" BTree constructor - takes a filehandle """
self.fh = fh
self.fh.seek(0)
data = self.fh.read(64)
if data[13:].startswith(b"B-tree v 1.5 (C) Pol 1990"):
self.parseheader15(data)
self.page = self.Page15
self.version = 15
elif data[19:].startswith(b"B-tree v 1.6 (C) Pol 1990"):
self.parseheader16(data)
self.page = self.Page16
self.version = 16
elif data[19:].startswith(b"B-tree v2"):
self.parseheader16(data)
self.page = self.Page20
self.version = 20
else:
print("unknown btree: %s" % hexdump(data))
raise Exception("unknown b-tree")
def parseheader15(self, data):
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<HHHLH", data, 0)
def parseheader16(self, data):
# v16 and v20 both have the same header format
self.firstfree, self.pagesize, self.firstindex, self.reccount, self.pagecount = struct.unpack_from("<LHLLL", data, 0)
def readpage(self, nr):
self.fh.seek(nr * self.pagesize)
return self.page(self.fh.read(self.pagesize))
def find(self, rel, key):
"""
Searches for a record with the specified relation to the key
A cursor object is returned, the user can call getkey, getval on the cursor
to retrieve the actual value.
or call cursor.next() / cursor.prev() to enumerate values.
'eq' -> record equal to the key, None when not found
'le' -> last record with key <= to key
'ge' -> first record with key >= to key
'lt' -> last record with key < to key
'gt' -> first record with key > to key
"""
# descend tree to leaf nearest to the `key`
page = self.readpage(self.firstindex)
stack = []
while len(stack) < 256:
act, ix = page.find(key)
stack.append((page, ix))
if act != 'recurse':
break
page = self.readpage(page.getpage(ix))
if len(stack) == 256:
raise Exception("b-tree corrupted")
cursor = BTree.Cursor(self, stack)
# now correct for what was actually asked.
if act == rel:
pass
elif rel == 'eq' and act != 'eq':
return None
elif rel in ('ge', 'le') and act == 'eq':
pass
elif rel in ('gt', 'ge') and act == 'lt':
cursor.next()
elif rel == 'gt' and act == 'eq':
cursor.next()
elif rel in ('lt', 'le') and act == 'gt':
cursor.prev()
elif rel == 'lt' and act == 'eq':
cursor.prev()
return cursor
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex)
def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree
def dumpindented(self, pn, indent=0):
"""
Dump all nodes of the current page with keys indented, showing how the `indent`
feature works
"""
page = self.readpage(pn)
print(" " * indent, page)
if page.isindex():
print(" " * indent, end="")
self.dumpindented(page.preceeding, indent + 1)
for p in range(len(page.index)):
print(" " * indent, end="")
self.dumpindented(page.getpage(p), indent + 1)
def dumptree(self, pn):
"""
Walks entire tree, dumping all records on each page
in sequential order
"""
page = self.readpage(pn)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
if page.preceeding:
self.dumptree(page.preceeding)
for ent in page.index:
self.dumptree(ent.page)
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.prettykey | python | def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key) | returns the key in a readable format. | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L862-L893 | [
"def hexdump(data):\n if data is None:\n return\n return binascii.b2a_hex(data).decode('utf-8')\n",
"def decodekey(self, key):\n \"\"\"\n splits a key in a tuple, one of:\n ( [ 'N', 'n', '$' ], 0, bignameid )\n ( [ 'N', 'n', '$' ], name )\n ( '-', id )\n ( '.', id... | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.prettyval | python | def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore') | returns the value in a readable format. | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L895-L906 | [
"def hexdump(data):\n if data is None:\n return\n return binascii.b2a_hex(data).decode('utf-8')\n"
] | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.nodeByName | python | def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0] | Return a nodeid by name | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L908-L917 | [
"def namekey(self, name):\n if type(name) in (int, long):\n return struct.pack(\"<sB\" + self.fmt, b'N', 0, name)\n return b'N' + name.encode('utf-8')\n"
] | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.makekey | python | def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args) | return a binary key for the nodeid, tag and optional value | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L924-L936 | null | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.decodekey | python | def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format") | splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name ) | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L938-L965 | null | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.bytes | python | def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval() | return a raw value for the given arguments | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L967-L975 | [
"def makekey(self, *args):\n \"\"\" return a binary key for the nodeid, tag and optional value \"\"\"\n if len(args) > 1:\n args = args[:1] + (args[1].encode('utf-8'),) + args[2:]\n if len(args) == 3 and type(args[-1]) == str:\n # node.tag.string type keys\n return struct.pack(self.key... | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.int | python | def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data)) | Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L977-L994 | [
"def hexdump(data):\n if data is None:\n return\n return binascii.b2a_hex(data).decode('utf-8')\n",
"def bytes(self, *args):\n \"\"\" return a raw value for the given arguments \"\"\"\n if len(args) == 1 and isinstance(args[0], BTree.Cursor):\n cur = args[0]\n else:\n cur = sel... | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.string | python | def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8') | return string stored in node | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L996-L1000 | [
"def bytes(self, *args):\n \"\"\" return a raw value for the given arguments \"\"\"\n if len(args) == 1 and isinstance(args[0], BTree.Cursor):\n cur = args[0]\n else:\n cur = self.btree.find('eq', self.makekey(*args))\n\n if cur:\n return cur.getval()\n"
] | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.name | python | def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8') | resolves a name, both short and long names. | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L1002-L1014 | [
"def bytes(self, *args):\n \"\"\" return a raw value for the given arguments \"\"\"\n if len(args) == 1 and isinstance(args[0], BTree.Cursor):\n cur = args[0]\n else:\n cur = self.btree.find('eq', self.makekey(*args))\n\n if cur:\n return cur.getval()\n",
"def blob(self, nodeid, t... | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID0File.blob | python | def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data | Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset. | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L1016-L1033 | [
"def makekey(self, *args):\n \"\"\" return a binary key for the nodeid, tag and optional value \"\"\"\n if len(args) > 1:\n args = args[:1] + (args[1].encode('utf-8'),) + args[2:]\n if len(args) == 3 and type(args[-1]) == str:\n # node.tag.string type keys\n return struct.pack(self.key... | class ID0File(object):
"""
Reads .id0 or 0.ida files, containing a v1.5, v1.6 or v2.0 b-tree database.
This is basically the low level netnode interface from the idasdk.
There are two major groups of nodes in the database:
key = "N"+name -> value = littleendian(nodeid)
key = "."+bigendian(nodeid)+char(tag)+bigendian(value)
key = "."+bigendian(nodeid)+char(tag)+string
key = "."+bigendian(nodeid)+char(tag)
and some special nodes for bookkeeping:
"$ MAX LINK"
"$ MAX NODE"
"$ NET DESC"
Very old databases also have name entries with a lowercase 'n',
and corresponding '-'+value nodes.
I am not sure what those are for.
several items have specially named nodes, like "$ structs", "$ enums", "Root Node"
nodeByName(name) returns the nodeid for a name
bytes(nodeid, tag, val) returns the value for a specific node.
"""
INDEX = 0
def __init__(self, idb, fh):
self.btree = BTree(fh)
self.wordsize = None
if idb.magic == 'IDA2':
# .i64 files use 64 bit values for some things.
self.wordsize = 8
elif idb.magic in ('IDA0', 'IDA1'):
self.wordsize = 4
else:
# determine wordsize from value of '$ MAX NODE'
c = self.btree.find('eq', b'$ MAX NODE')
if c and not c.eof():
self.wordsize = len(c.getval())
if self.wordsize not in (4, 8):
print("Can not determine wordsize for database - assuming 32 bit")
self.wordsize = 4
if self.wordsize == 4:
self.nodebase = 0xFF000000
self.fmt = "L"
else:
self.nodebase = 0xFF00000000000000
self.fmt = "Q"
# set the keyformat for this database
self.keyfmt = ">s" + self.fmt + "s" + self.fmt
def prettykey(self, key):
"""
returns the key in a readable format.
"""
f = list(self.decodekey(key))
f[0] = f[0].decode('utf-8')
if len(f) > 2 and type(f[2]) == bytes:
f[2] = f[2].decode('utf-8')
if f[0] == '.':
if len(f) == 2:
return "%s%16x" % tuple(f)
elif len(f) == 3:
return "%s%16x %s" % tuple(f)
elif len(f) == 4:
if f[2] == 'H' and type(f[3]) in (str, bytes):
f[3] = f[3].decode('utf-8')
return "%s%16x %s '%s'" % tuple(f)
elif type(f[3]) in (int, long):
return "%s%16x %s %x" % tuple(f)
else:
f[3] = hexdump(f[3])
return "%s%16x %s %s" % tuple(f)
elif f[0] in ('N', 'n', '$'):
if type(f[1]) in (int, long):
return "%s %x %16x" % tuple(f)
else:
return "%s'%s'" % tuple(f)
elif f[0] == '-':
return "%s %x" % tuple(f)
return hexdump(key)
def prettyval(self, val):
"""
returns the value in a readable format.
"""
if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL):
return "%x" % struct.unpack("<" + self.fmt, val)
if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL):
return hexdump(val)
val = val.replace(b"\n", b"\\n")
return "'%s'" % val.decode('utf-8', 'ignore')
def nodeByName(self, name):
""" Return a nodeid by name """
# note: really long names are encoded differently:
# 'N'+'\x00'+pack('Q', nameid) => ofs
# and (ofs, 'N') -> nameid
# at nodebase ( 0xFF000000, 'S', 0x100*nameid ) there is a series of blobs for max 0x80000 sized names.
cur = self.btree.find('eq', self.namekey(name))
if cur:
return struct.unpack('<' + self.fmt, cur.getval())[0]
def namekey(self, name):
if type(name) in (int, long):
return struct.pack("<sB" + self.fmt, b'N', 0, name)
return b'N' + name.encode('utf-8')
def makekey(self, *args):
""" return a binary key for the nodeid, tag and optional value """
if len(args) > 1:
args = args[:1] + (args[1].encode('utf-8'),) + args[2:]
if len(args) == 3 and type(args[-1]) == str:
# node.tag.string type keys
return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')
elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:
# negative values -> need lowercase fmt char
return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)
else:
# node.tag.value type keys
return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
def decodekey(self, key):
"""
splits a key in a tuple, one of:
( [ 'N', 'n', '$' ], 0, bignameid )
( [ 'N', 'n', '$' ], name )
( '-', id )
( '.', id )
( '.', id, tag )
( '.', id, tag, value )
( '.', id, 'H', name )
"""
if key[:1] in (b'n', b'N', b'$'):
if key[1:2] == b"\x00" and len(key) == 2 + self.wordsize:
return struct.unpack(">sB" + self.fmt, key)
else:
return key[:1], key[1:].decode('utf-8', 'ignore')
if key[:1] == b'-':
return struct.unpack(">s" + self.fmt, key)
if len(key) == 1 + self.wordsize:
return struct.unpack(self.keyfmt[:3], key)
if len(key) == 1 + self.wordsize + 1:
return struct.unpack(self.keyfmt[:4], key)
if len(key) == 1 + 2 * self.wordsize + 1:
return struct.unpack(self.keyfmt[:5], key)
if len(key) > 1 + self.wordsize + 1:
f = struct.unpack_from(self.keyfmt[:4], key)
return f + (key[2 + self.wordsize:], )
raise Exception("unknown key format")
def bytes(self, *args):
""" return a raw value for the given arguments """
if len(args) == 1 and isinstance(args[0], BTree.Cursor):
cur = args[0]
else:
cur = self.btree.find('eq', self.makekey(*args))
if cur:
return cur.getval()
def int(self, *args):
"""
Return the integer stored in the specified node.
Any type of integer will be decoded: byte, short, long, long long
"""
data = self.bytes(*args)
if data is not None:
if len(data) == 1:
return struct.unpack("<B", data)[0]
if len(data) == 2:
return struct.unpack("<H", data)[0]
if len(data) == 4:
return struct.unpack("<L", data)[0]
if len(data) == 8:
return struct.unpack("<Q", data)[0]
print("can't get int from %s" % hexdump(data))
def string(self, *args):
""" return string stored in node """
data = self.bytes(*args)
if data is not None:
return data.rstrip(b"\x00").decode('utf-8')
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8')
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID1File.dump | python | def dump(self):
""" print first and last bits for each segment """
for seg in self.seglist:
print("==== %08x-%08x" % (seg.startea, seg.endea))
if seg.endea - seg.startea < 30:
for ea in range(seg.startea, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
else:
for ea in range(seg.startea, seg.startea + 10):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
print("...")
for ea in range(seg.endea - 10, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea))) | print first and last bits for each segment | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L1103-L1115 | [
"def getFlags(self, ea):\n seg = self.find_segment(ea)\n self.fh.seek(seg.offset + 4 * (ea - seg.startea))\n return struct.unpack(\"<L\", self.fh.read(4))[0]\n"
] | class ID1File(object):
"""
Reads .id1 or 1.IDA files, containing byte flags
This is basically the information for the .idc GetFlags(ea),
FirstSeg(), NextSeg(ea), SegStart(ea), SegEnd(ea) functions
"""
INDEX = 1
class SegInfo:
def __init__(self, startea, endea, offset):
self.startea = startea
self.endea = endea
self.offset = offset
def __init__(self, idb, fh):
if idb.magic == 'IDA2':
wordsize, fmt = 8, "Q"
else:
wordsize, fmt = 4, "L"
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
self.fh = fh
fh.seek(0)
hdrdata = fh.read(32)
magic = hdrdata[:4]
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
nsegments, npages = struct.unpack_from("<HH", hdrdata, 4)
# filesize / npages == 0x2000 for all cases
seglistofs = 8
seginfosize = 3
elif magic == b'VA*\x00':
always3, nsegments, always2k, npages = struct.unpack_from("<LLLL", hdrdata, 4)
if always3 != 3:
print("ID1: first dword != 3: %08x" % always3)
if always2k != 0x800:
print("ID1: third dword != 2k: %08x" % always2k)
seglistofs = 20
seginfosize = 2
else:
raise Exception("unknown id1 magic: %s" % hexdump(magic))
self.seglist = []
# Va0 - ida v3.0.5
# Va3 - ida v3.6
fh.seek(seglistofs)
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
segdata = fh.read(nsegments * 3 * wordsize)
for o in range(nsegments):
startea, endea, id1ofs = struct.unpack_from("<" + fmt + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
elif magic == b'VA*\x00':
segdata = fh.read(nsegments * 2 * wordsize)
id1ofs = 0x2000
for o in range(nsegments):
startea, endea = struct.unpack_from("<" + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
id1ofs += 4 * (endea - startea)
def is32bit_heuristic(self, fh, seglistofs):
fh.seek(seglistofs)
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
def dump(self):
""" print first and last bits for each segment """
for seg in self.seglist:
print("==== %08x-%08x" % (seg.startea, seg.endea))
if seg.endea - seg.startea < 30:
for ea in range(seg.startea, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
else:
for ea in range(seg.startea, seg.startea + 10):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
print("...")
for ea in range(seg.endea - 10, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
def find_segment(self, ea):
""" do a linear search for the given address in the segment list """
for seg in self.seglist:
if seg.startea <= ea < seg.endea:
return seg
def getFlags(self, ea):
seg = self.find_segment(ea)
self.fh.seek(seg.offset + 4 * (ea - seg.startea))
return struct.unpack("<L", self.fh.read(4))[0]
def firstSeg(self):
return self.seglist[0].startea
def nextSeg(self, ea):
for i, seg in enumerate(self.seglist):
if seg.startea <= ea < seg.endea:
if i + 1 < len(self.seglist):
return self.seglist[i + 1].startea
else:
return
def segStart(self, ea):
seg = self.find_segment(ea)
return seg.startea
def segEnd(self, ea):
seg = self.find_segment(ea)
return seg.endea
|
Carbonara-Project/Guanciale | guanciale/idblib.py | ID1File.find_segment | python | def find_segment(self, ea):
""" do a linear search for the given address in the segment list """
for seg in self.seglist:
if seg.startea <= ea < seg.endea:
return seg | do a linear search for the given address in the segment list | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L1117-L1121 | null | class ID1File(object):
"""
Reads .id1 or 1.IDA files, containing byte flags
This is basically the information for the .idc GetFlags(ea),
FirstSeg(), NextSeg(ea), SegStart(ea), SegEnd(ea) functions
"""
INDEX = 1
class SegInfo:
def __init__(self, startea, endea, offset):
self.startea = startea
self.endea = endea
self.offset = offset
def __init__(self, idb, fh):
if idb.magic == 'IDA2':
wordsize, fmt = 8, "Q"
else:
wordsize, fmt = 4, "L"
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
self.fh = fh
fh.seek(0)
hdrdata = fh.read(32)
magic = hdrdata[:4]
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
nsegments, npages = struct.unpack_from("<HH", hdrdata, 4)
# filesize / npages == 0x2000 for all cases
seglistofs = 8
seginfosize = 3
elif magic == b'VA*\x00':
always3, nsegments, always2k, npages = struct.unpack_from("<LLLL", hdrdata, 4)
if always3 != 3:
print("ID1: first dword != 3: %08x" % always3)
if always2k != 0x800:
print("ID1: third dword != 2k: %08x" % always2k)
seglistofs = 20
seginfosize = 2
else:
raise Exception("unknown id1 magic: %s" % hexdump(magic))
self.seglist = []
# Va0 - ida v3.0.5
# Va3 - ida v3.6
fh.seek(seglistofs)
if magic in (b'Va4\x00', b'Va3\x00', b'Va2\x00', b'Va1\x00', b'Va0\x00'):
segdata = fh.read(nsegments * 3 * wordsize)
for o in range(nsegments):
startea, endea, id1ofs = struct.unpack_from("<" + fmt + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
elif magic == b'VA*\x00':
segdata = fh.read(nsegments * 2 * wordsize)
id1ofs = 0x2000
for o in range(nsegments):
startea, endea = struct.unpack_from("<" + fmt + fmt, segdata, o * seginfosize * wordsize)
self.seglist.append(self.SegInfo(startea, endea, id1ofs))
id1ofs += 4 * (endea - startea)
def is32bit_heuristic(self, fh, seglistofs):
fh.seek(seglistofs)
# todo: verify wordsize using the following heuristic:
# L -> starting at: seglistofs + nsegs*seginfosize are all zero
# L -> starting at seglistofs .. nsegs*seginfosize every even word must be unique
def dump(self):
""" print first and last bits for each segment """
for seg in self.seglist:
print("==== %08x-%08x" % (seg.startea, seg.endea))
if seg.endea - seg.startea < 30:
for ea in range(seg.startea, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
else:
for ea in range(seg.startea, seg.startea + 10):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
print("...")
for ea in range(seg.endea - 10, seg.endea):
print(" %08x: %08x" % (ea, self.getFlags(ea)))
def find_segment(self, ea):
""" do a linear search for the given address in the segment list """
for seg in self.seglist:
if seg.startea <= ea < seg.endea:
return seg
def getFlags(self, ea):
seg = self.find_segment(ea)
self.fh.seek(seg.offset + 4 * (ea - seg.startea))
return struct.unpack("<L", self.fh.read(4))[0]
def firstSeg(self):
return self.seglist[0].startea
def nextSeg(self, ea):
for i, seg in enumerate(self.seglist):
if seg.startea <= ea < seg.endea:
if i + 1 < len(self.seglist):
return self.seglist[i + 1].startea
else:
return
def segStart(self, ea):
seg = self.find_segment(ea)
return seg.startea
def segEnd(self, ea):
seg = self.find_segment(ea)
return seg.endea
|
Carbonara-Project/Guanciale | guanciale/matching.py | ProcedureHandler.handleFlow | python | def handleFlow(self):
#TODO replace sorting loops with sorted function
self.targets = {}
self.api = []
#flow = []
addrs = []
internals = []
for instr in self.bb_insns:
if isinstance(instr, CallInsn):
if instr.is_api:
self.targets[instr.addr] = "API:" + instr.fcn_name
self.api.append({"name": instr.fcn_name})
else:
internals.append(instr.addr)
else:
if instr.jumpout:
internals.append(instr.addr)
else:
addrs.append(instr.addr)
addrs.append(instr.offset)
addrs.sort()
addrs_dict = {}
for i in range(len(addrs)):
addrs_dict[addrs[i]] = i
internals_sorted = internals[:]
internals_sorted.sort()
calleds_dict = {}
for i in range(len(internals_sorted)):
calleds_dict[internals_sorted[i]] = str(i)
flowhash = datasketch.MinHash(num_perm=32)
for instr in self.bb_insns:
if isinstance(instr, CallInsn):
if instr.is_api:
#flow.append(hex(instr.offset)+" API:" + instr.fcn_name)
flowhash.update("API:" + instr.fcn_name)
else:
#flow.append(hex(instr.offset)+" OUT:" + calleds_dict[instr.addr])
flowhash.update("OUT:" + calleds_dict[instr.addr])
self.targets[instr.addr] = "OUT:" + calleds_dict[instr.addr]
else:
if instr.jumpout:
#flow.append(hex(instr.offset)+" OUT:" + calleds_dict[instr.addr])
flowhash.update("OUT:" + calleds_dict[instr.addr])
self.targets[instr.addr] = "OUT:" + calleds_dict[instr.addr]
else:
off = addrs_dict[instr.offset]
tgt = addrs_dict[instr.addr]
#flow.append("%x (%d) JMP:%s - %x (%d)" % (instr.offset, off, str(tgt - off), instr.addr, tgt))
flowhash.update("JMP:" + str(tgt - off))
self.targets[instr.addr] = "JMP:" + str(tgt - off)
lean_flowhash = datasketch.LeanMinHash(flowhash)
flowhash_buf = bytearray(lean_flowhash.bytesize())
lean_flowhash.serialize(flowhash_buf)
self.flowhash = str(flowhash_buf)
'''
for f in flow:
print f
for pp in self.bb_insns:
print pp
''' | for f in flow:
print f
for pp in self.bb_insns:
print pp | train | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/matching.py#L258-L328 | null | class ProcedureHandler(object):
def __init__(self, insns_list, bb_insns, arch):
self.insns_list = insns_list
self.bb_insns = bb_insns
self.arch = arch
def work(self):
self.handleFlow()
self.handleInsns()
def handleInsns(self):
consts = {}
ips = []
#set dafaukt value for PC, SP, BP
pc_offset = self.arch.ip_offset
regs = {
pc_offset: 0,
self.arch.sp_offset: 1,
self.arch.bp_offset: 2
}
consts = {}
irsbs = []
for instr_c in range(len(self.insns_list)):
off = self.insns_list[instr_c][0]
instr = self.insns_list[instr_c][1]
#manage instruction not recognized by libVEX
if self.arch.name == "X86" or self.arch.name == "AMD64":
if instr == "\xf4": #hlt x86 instruction
irsbs.append("HALT")
continue
elif instr.startswith("\xf0"): #lock x86 prefix
irsbs.append("LOCK")
if len(instr) == 1:
continue
instr = instr[1:]
try:
irsb = pyvex.IRSB(instr, off, self.arch, opt_level=0)
except pyvex.errors.PyVEXError as err:
print("[Please report to the developer] Error with instruction " + instr.encode("hex"))
raise err
irsbs.append(irsb)
stmts = irsb.statements
n_addr = 0
for i in range(len(stmts)):
#TODO PutI GetI
if isinstance(stmts[i], pyvex.stmt.IMark):
n_addr = stmts[i].addr + stmts[i].len
elif isinstance(stmts[i], pyvex.stmt.Put):
if stmts[i].offset == pc_offset and len(stmts[i].constants) == 1:
c = stmts[i].constants[0]
if c.value in self.targets:
stmts[i].data = StrConst(self.targets[c.value])
stmts[i].offset = 0
continue
elif c.value == n_addr:
stmts[i].data = StrConst("_NEXT_")
stmts[i].offset = 0
continue
else:
ips.append(c.value)
stmts[i].reg_name = 0xABADCAFE
stmts[i].offset = 0
else:
# constants replace
for j in range(len(stmts[i].constants)):
c = stmts[i].constants[j]
if c.value in self.targets:
stmts[i].constants[j] = StrConst(self.targets[c.value])
elif c.value == n_addr:
stmts[i].constants[j] = StrConst("_NEXT_")
else:
# constants abstraction
consts[c.value] = consts.get(c.value, len(consts))
c.value = consts[c.value]
# registers abstraction
regs[stmts[i].offset] = regs.get(stmts[i].offset, len(regs))
stmts[i].offset = regs[stmts[i].offset]
elif isinstance(stmts[i], pyvex.stmt.Exit):
c = stmts[i].dst
if c.value in self.targets:
stmts[i] = "if (%s) { PUT(offset=0) = %s; %s }" % (stmts[i].guard, self.targets[c.value], stmts[i].jumpkind)
continue
else:
ips.append(c.value)
stmts[i].reg_name = 0xDEADBEEF
else:
# constants replace
for j in range(len(stmts[i].constants)):
c = stmts[i].constants[j]
if c.value in self.targets:
stmts[i].constants[j] = StrConst(self.targets[c.value])
elif c.value == n_addr:
stmts[i].constants[j] = StrConst("_NEXT_")
else:
# constants abstraction
consts[c.value] = consts.get(c.value, len(consts))
c.value = consts[c.value]
for expr in stmts[i].expressions:
if isinstance(expr, pyvex.expr.Get):
# registers abstraction
regs[expr.offset] = regs.get(expr.offset, len(regs))
expr.offset = regs[expr.offset]
#order addresses
addrs = {}
ips.sort()
for i in range(len(ips)):
addrs[ips[i]] = i
#self.vex_code = ""
#self.shingled_code = ""
vexhash = datasketch.MinHash(num_perm=64)
shingled = {}
last = ""
for c in range(len(irsbs)):
irsb = irsbs[c]
if type(irsb) == type(""):
ngram = last + irsb
#self.vex_code += "+++ Instr #%d +++\n%s\n" % (c, irsb)
shingled[ngram] = shingled.get(ngram, 0) +1
last = irsb
continue
stmts = irsb.statements
ins = ""
for i in range(len(stmts)):
if isinstance(stmts[i], pyvex.stmt.IMark) or isinstance(stmts[i], pyvex.stmt.AbiHint):
continue
if hasattr(stmts[i], "reg_name"):
if stmts[i].reg_name == 0xABADCAFE:
stmts[i].constants[0].value = addrs[stmts[i].constants[0].value]
elif stmts[i].reg_name == 0xDEADBEEF:
stmts[i].dst.value = addrs[stmts[i].dst.value]
v = str(stmts[i]) + "\n"
ins += v
ngram = last + v
shingled[ngram] = shingled.get(ngram, 0) +1
last = v
#self.vex_code += "+++ Instr #%d +++\n%s\n" % (c, ins)
for ngram in shingled:
for c in range(shingled[ngram]):
vexhash.update("[%d]\n%s" % (c, ngram))
#self.shingled_code += "[%d]\n%s" % (c, ngram)
lean_vexhash = datasketch.LeanMinHash(vexhash)
vexhash_buf = bytearray(lean_vexhash.bytesize())
lean_vexhash.serialize(vexhash_buf)
self.vexhash = str(vexhash_buf)
def handleFlow(self):
#TODO replace sorting loops with sorted function
self.targets = {}
self.api = []
#flow = []
addrs = []
internals = []
for instr in self.bb_insns:
if isinstance(instr, CallInsn):
if instr.is_api:
self.targets[instr.addr] = "API:" + instr.fcn_name
self.api.append({"name": instr.fcn_name})
else:
internals.append(instr.addr)
else:
if instr.jumpout:
internals.append(instr.addr)
else:
addrs.append(instr.addr)
addrs.append(instr.offset)
addrs.sort()
addrs_dict = {}
for i in range(len(addrs)):
addrs_dict[addrs[i]] = i
internals_sorted = internals[:]
internals_sorted.sort()
calleds_dict = {}
for i in range(len(internals_sorted)):
calleds_dict[internals_sorted[i]] = str(i)
flowhash = datasketch.MinHash(num_perm=32)
for instr in self.bb_insns:
if isinstance(instr, CallInsn):
if instr.is_api:
#flow.append(hex(instr.offset)+" API:" + instr.fcn_name)
flowhash.update("API:" + instr.fcn_name)
else:
#flow.append(hex(instr.offset)+" OUT:" + calleds_dict[instr.addr])
flowhash.update("OUT:" + calleds_dict[instr.addr])
self.targets[instr.addr] = "OUT:" + calleds_dict[instr.addr]
else:
if instr.jumpout:
#flow.append(hex(instr.offset)+" OUT:" + calleds_dict[instr.addr])
flowhash.update("OUT:" + calleds_dict[instr.addr])
self.targets[instr.addr] = "OUT:" + calleds_dict[instr.addr]
else:
off = addrs_dict[instr.offset]
tgt = addrs_dict[instr.addr]
#flow.append("%x (%d) JMP:%s - %x (%d)" % (instr.offset, off, str(tgt - off), instr.addr, tgt))
flowhash.update("JMP:" + str(tgt - off))
self.targets[instr.addr] = "JMP:" + str(tgt - off)
lean_flowhash = datasketch.LeanMinHash(flowhash)
flowhash_buf = bytearray(lean_flowhash.bytesize())
lean_flowhash.serialize(flowhash_buf)
self.flowhash = str(flowhash_buf)
'''
for f in flow:
print f
for pp in self.bb_insns:
print pp
'''
|
fitnr/twitter_bot_utils | twitter_bot_utils/archive.py | read_csv | python | def read_csv(directory):
'''
Scrape a twitter archive csv, yielding tweet text.
Args:
directory (str): CSV file or (directory containing tweets.csv).
field (str): Field with the tweet's text (default: text).
fieldnames (list): The column names for a csv with no header. Must contain <field>.
Leave as None to read CSV header (default: None).
Returns:
generator
'''
if path.isdir(directory):
csvfile = path.join(directory, 'tweets.csv')
else:
csvfile = directory
with open(csvfile, 'r') as f:
for tweet in csv.DictReader(f):
try:
tweet['text'] = tweet['text'].decode('utf-8')
except AttributeError:
pass
yield tweet | Scrape a twitter archive csv, yielding tweet text.
Args:
directory (str): CSV file or (directory containing tweets.csv).
field (str): Field with the tweet's text (default: text).
fieldnames (list): The column names for a csv with no header. Must contain <field>.
Leave as None to read CSV header (default: None).
Returns:
generator | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/archive.py#L22-L47 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os import path
import json
import csv
from glob import iglob
def read_json(directory, data_files='data/js/tweets/*.js'):
'''
Scrape a twitter archive file.
Inspiration from https://github.com/mshea/Parse-Twitter-Archive
'''
files = path.join(directory, data_files)
for fname in iglob(files):
with open(fname, 'r') as f:
# Twitter's JSON first line is bogus
data = f.readlines()[1:]
data = "".join(data)
tweetlist = json.loads(data)
for tweet in tweetlist:
yield tweet
def read_text(data_file):
with open(data_file, 'r') as f:
data = f.readlines()
for tweet in data:
try:
yield tweet.rstrip().decode('utf-8')
except AttributeError:
yield tweet.rstrip()
|
fitnr/twitter_bot_utils | twitter_bot_utils/archive.py | read_json | python | def read_json(directory, data_files='data/js/tweets/*.js'):
'''
Scrape a twitter archive file.
Inspiration from https://github.com/mshea/Parse-Twitter-Archive
'''
files = path.join(directory, data_files)
for fname in iglob(files):
with open(fname, 'r') as f:
# Twitter's JSON first line is bogus
data = f.readlines()[1:]
data = "".join(data)
tweetlist = json.loads(data)
for tweet in tweetlist:
yield tweet | Scrape a twitter archive file.
Inspiration from https://github.com/mshea/Parse-Twitter-Archive | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/archive.py#L50-L65 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os import path
import json
import csv
from glob import iglob
def read_csv(directory):
'''
Scrape a twitter archive csv, yielding tweet text.
Args:
directory (str): CSV file or (directory containing tweets.csv).
field (str): Field with the tweet's text (default: text).
fieldnames (list): The column names for a csv with no header. Must contain <field>.
Leave as None to read CSV header (default: None).
Returns:
generator
'''
if path.isdir(directory):
csvfile = path.join(directory, 'tweets.csv')
else:
csvfile = directory
with open(csvfile, 'r') as f:
for tweet in csv.DictReader(f):
try:
tweet['text'] = tweet['text'].decode('utf-8')
except AttributeError:
pass
yield tweet
def read_text(data_file):
with open(data_file, 'r') as f:
data = f.readlines()
for tweet in data:
try:
yield tweet.rstrip().decode('utf-8')
except AttributeError:
yield tweet.rstrip()
|
fitnr/twitter_bot_utils | twitter_bot_utils/tools.py | _autofollow | python | def _autofollow(api, action, dry_run):
'''
Follow back or unfollow the friends/followers of user authenicated in 'api'.
:api twitter_bot_utils.api.API
:dry_run bool don't actually (un)follow, just report
'''
try:
# get the last 5000 followers
followers = api.followers_ids()
# Get the last 5000 people user has followed
friends = api.friends_ids()
except TweepError as e:
api.logger.error('%s: error getting followers/followers', action)
api.logger.error("%s", e)
return
if action == "unfollow":
method = api.destroy_friendship
independent, dependent = followers, friends
elif action == "follow":
method = api.create_friendship
independent, dependent = friends, followers
else:
raise IndexError("Unknown action: {}".format(action))
api.logger.info('%sing: found %s friends, %s followers', action, len(friends), len(followers))
# auto-following:
# for all my followers
# if i don't already follow them: create friendship
# auto-unfollowing:
# for all my friends
# if they don't follow me: destroy friendship
targets = [x for x in dependent if x not in independent]
for uid in targets:
try:
api.logger.info('%sing %s', action, uid)
if not dry_run:
method(id=uid)
except RateLimitError:
api.logger.warning("reached Twitter's rate limit, sleeping for %d minutes", RATE_LIMIT_RESET_MINUTES)
sleep(RATE_LIMIT_RESET_MINUTES * 60)
method(id=uid)
except TweepError as e:
api.logger.error('error %sing on %s', action, uid)
api.logger.error("code %s: %s", e.api_code, e) | Follow back or unfollow the friends/followers of user authenicated in 'api'.
:api twitter_bot_utils.api.API
:dry_run bool don't actually (un)follow, just report | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/tools.py#L30-L83 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from time import sleep
from tweepy.error import RateLimitError, TweepError
RATE_LIMIT_RESET_MINUTES = 15
def follow_back(api, dry_run=None):
_autofollow(api, 'follow', dry_run)
def unfollow(api, dry_run=None):
_autofollow(api, 'unfollow', dry_run)
def fave_mentions(api, dry_run=None):
'''
Fave (aka like) recent mentions from user authenicated in 'api'.
:api twitter_bot_utils.api.API
:dry_run bool don't actually favorite, just report
'''
f = api.favorites(include_entities=False, count=150)
favs = [m.id_str for m in f]
try:
mentions = api.mentions_timeline(trim_user=True, include_entities=False, count=75)
except Exception as e:
raise e
for mention in mentions:
# only try to fav if not in recent favs
if mention.id_str not in favs:
try:
api.logger.info('liking %s: %s', mention.id_str, mention.text)
if not dry_run:
api.create_favorite(mention.id_str, include_entities=False)
except RateLimitError:
api.logger.warning("reached Twitter's rate limit, sleeping for %d minutes", RATE_LIMIT_RESET_MINUTES)
sleep(RATE_LIMIT_RESET_MINUTES * 60)
api.create_favorite(mention.id_str, include_entities=False)
except TweepError as e:
api.logger.error('error liking %s', mention.id_str)
api.logger.error("code %s: %s", e.api_code, e)
|
fitnr/twitter_bot_utils | twitter_bot_utils/tools.py | fave_mentions | python | def fave_mentions(api, dry_run=None):
'''
Fave (aka like) recent mentions from user authenicated in 'api'.
:api twitter_bot_utils.api.API
:dry_run bool don't actually favorite, just report
'''
f = api.favorites(include_entities=False, count=150)
favs = [m.id_str for m in f]
try:
mentions = api.mentions_timeline(trim_user=True, include_entities=False, count=75)
except Exception as e:
raise e
for mention in mentions:
# only try to fav if not in recent favs
if mention.id_str not in favs:
try:
api.logger.info('liking %s: %s', mention.id_str, mention.text)
if not dry_run:
api.create_favorite(mention.id_str, include_entities=False)
except RateLimitError:
api.logger.warning("reached Twitter's rate limit, sleeping for %d minutes", RATE_LIMIT_RESET_MINUTES)
sleep(RATE_LIMIT_RESET_MINUTES * 60)
api.create_favorite(mention.id_str, include_entities=False)
except TweepError as e:
api.logger.error('error liking %s', mention.id_str)
api.logger.error("code %s: %s", e.api_code, e) | Fave (aka like) recent mentions from user authenicated in 'api'.
:api twitter_bot_utils.api.API
:dry_run bool don't actually favorite, just report | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/tools.py#L86-L116 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from time import sleep
from tweepy.error import RateLimitError, TweepError
RATE_LIMIT_RESET_MINUTES = 15
def follow_back(api, dry_run=None):
_autofollow(api, 'follow', dry_run)
def unfollow(api, dry_run=None):
_autofollow(api, 'unfollow', dry_run)
def _autofollow(api, action, dry_run):
'''
Follow back or unfollow the friends/followers of user authenicated in 'api'.
:api twitter_bot_utils.api.API
:dry_run bool don't actually (un)follow, just report
'''
try:
# get the last 5000 followers
followers = api.followers_ids()
# Get the last 5000 people user has followed
friends = api.friends_ids()
except TweepError as e:
api.logger.error('%s: error getting followers/followers', action)
api.logger.error("%s", e)
return
if action == "unfollow":
method = api.destroy_friendship
independent, dependent = followers, friends
elif action == "follow":
method = api.create_friendship
independent, dependent = friends, followers
else:
raise IndexError("Unknown action: {}".format(action))
api.logger.info('%sing: found %s friends, %s followers', action, len(friends), len(followers))
# auto-following:
# for all my followers
# if i don't already follow them: create friendship
# auto-unfollowing:
# for all my friends
# if they don't follow me: destroy friendship
targets = [x for x in dependent if x not in independent]
for uid in targets:
try:
api.logger.info('%sing %s', action, uid)
if not dry_run:
method(id=uid)
except RateLimitError:
api.logger.warning("reached Twitter's rate limit, sleeping for %d minutes", RATE_LIMIT_RESET_MINUTES)
sleep(RATE_LIMIT_RESET_MINUTES * 60)
method(id=uid)
except TweepError as e:
api.logger.error('error %sing on %s', action, uid)
api.logger.error("code %s: %s", e.api_code, e)
|
fitnr/twitter_bot_utils | twitter_bot_utils/cli.py | post | python | def post(arguments):
'''Post text to a given twitter account.'''
twitter = api.API(arguments)
params = {}
if arguments.update == '-':
params['status'] = sys.stdin.read()
else:
params['status'] = arguments.update
if arguments.media_file:
medias = [twitter.media_upload(m) for m in arguments.media_file]
params['media_ids'] = [m.media_id for m in medias]
try:
logging.getLogger(arguments.screen_name).info('status: %s', params['status'])
if not arguments.dry_run:
twitter.update_status(**params)
except tweepy.TweepError as e:
logging.getLogger(arguments.screen_name).error(e.message) | Post text to a given twitter account. | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/cli.py#L145-L165 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import logging
from argparse import ArgumentParser
import tweepy
from . import __version__ as version
from . import api, args, confighelper, tools
ARGS = ['config', 'dry-run', 'verbose', 'quiet']
AUTHORIZATION_FAILED_MESSAGE = "Authorization failed. Check that the consumer key and secret are correct."
DEPRECATION = 'This command is deprecated. Please use the tbu command.'
def fave_mentions(arguments=None):
if arguments is None:
parser = ArgumentParser(description='fave/like mentions', usage='%(prog)s [options] screen_name')
parser.add_argument('screen_name', type=str)
args.add_default_args(parser, version=version, include=ARGS)
print(DEPRECATION, file=sys.stderr)
arguments = parser.parse_args()
twitter = api.API(arguments)
tools.fave_mentions(twitter, arguments.dry_run)
def auto_follow(arguments=None):
if arguments is None:
parser = ArgumentParser(description="automatic following and unfollowing",
usage='%(prog)s [options] screen_name')
parser.add_argument('screen_name', type=str)
parser.add_argument('-U', '--unfollow', action='store_true', help="Unfollow those who don't follow you")
args.add_default_args(parser, version=version, include=ARGS)
arguments = parser.parse_args()
print(DEPRECATION, file=sys.stderr)
twitter = api.API(arguments)
if arguments.unfollow:
tools.unfollow(twitter, arguments.dry_run)
else:
tools.follow_back(twitter, arguments.dry_run)
def authenticate(arguments=None):
if arguments is None:
parser = ArgumentParser(description='Authorize an account with a twitter application.')
parser.add_argument('-c', metavar='file', type=str, default=None, dest='config_file', help='config file')
parser.add_argument('--app', metavar='app', type=str, help='app name in config file')
parser.add_argument('-s', '--save', action='store_true', help='Save details to config file')
parser.add_argument('--consumer-key', metavar='key', type=str, help='consumer key (aka consumer token)')
parser.add_argument('--consumer-secret', metavar='secret', type=str, help='consumer secret')
parser.add_argument('-V', '--version', action='version', version="%(prog)s " + version)
arguments = parser.parse_args()
print(DEPRECATION, file=sys.stderr)
# it's possible to pass keys and then save them to the files
if arguments.config_file:
file_name = confighelper.find_file(arguments.config_file)
config = confighelper.parse(file_name)
else:
file_name = None
config = {}
# Use passed credentials.
if arguments.consumer_key and arguments.consumer_secret:
consumer_key = arguments.consumer_key
consumer_secret = arguments.consumer_secret
# Go find credentials.
else:
try:
conf = config['apps'][arguments.app] if arguments.app else config
consumer_secret = conf['consumer_secret']
consumer_key = conf['consumer_key']
except KeyError:
err = "Couldn't find consumer-key and consumer-secret for '{}' in {}".format(arguments.app, file_name)
raise KeyError(err)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, 'oob')
print(auth.get_authorization_url())
verifier = raw_input('Please visit this url, click "Authorize app" and enter in the PIN:\n> ')
try:
auth.get_access_token(verifier)
except tweepy.error.TweepError:
print(AUTHORIZATION_FAILED_MESSAGE)
return
# True is the const passed when no file name is given
if arguments.save is not True:
file_name = arguments.save
# Save the keys back to the config file
if arguments.save and file_name:
apps = config['apps'] = config.get('apps', {})
users = config['users'] = config.get('users', {})
app = arguments.app or 'default'
screen_name = auth.get_username().encode('utf-8')
apps[app] = apps.get(app, {})
apps[app].update({
'consumer_key': consumer_key,
'consumer_secret': consumer_secret,
})
users[screen_name] = users.get(screen_name, {})
users[screen_name].update({
'key': auth.access_token.encode('utf-8'),
'secret': auth.access_token_secret.encode('utf-8'),
'app': (arguments.app or 'default')
})
confighelper.dump(config, file_name)
print('Saved keys in {}'.format(file_name))
# Or just print them
else:
print('key: {}\nsecret: {}'.format(auth.access_token, auth.access_token_secret))
def main():
parser = ArgumentParser()
parser.add_argument('-V', '--version', action='version', version="%(prog)s " + version)
subparsers = parser.add_subparsers()
poster = subparsers.add_parser('post', description="Post text to a given twitter account",
usage='%(prog)s screen_name "update" [options]')
poster.add_argument('screen_name', type=str)
poster.add_argument('update', type=str)
poster.add_argument('-m', '--media-file', type=str, action='append')
args.add_default_args(poster, include=['config', 'dry-run', 'verbose', 'quiet'])
poster.set_defaults(func=post)
follow = subparsers.add_parser('follow', description="automatic following and unfollowing",
usage='%(prog)s [options] screen_name')
follow.add_argument('screen_name', type=str)
follow.add_argument('-U', '--unfollow', action='store_true', help="Unfollow those who don't follow you")
follow.set_defaults(func=auto_follow)
auth = subparsers.add_parser('auth', description='Authorize an account with a twitter application.',
usage='%(prog)s [options]')
auth.add_argument('-c', metavar='file', type=str, default=None, dest='config_file', help='config file')
auth.add_argument('--app', metavar='app', type=str, help='app name in config file')
auth.add_argument('-s', '--save', nargs='?', const=True,
help='Save details to config file. If no file is given, uses file in --config.')
auth.add_argument('--consumer-key', metavar='key', type=str, help='consumer key (aka consumer token)')
auth.add_argument('--consumer-secret', metavar='secret', type=str, help='consumer secret')
auth.set_defaults(func=authenticate)
fave = subparsers.add_parser('like', description='fave/like mentions', usage='%(prog)s [options] screen_name')
fave.add_argument('screen_name', type=str)
fave.set_defaults(func=fave)
arguments = parser.parse_args()
arguments.func(arguments)
|
fitnr/twitter_bot_utils | twitter_bot_utils/args.py | add_default_args | python | def add_default_args(parser, version=None, include=None):
'''
Add default arguments to a parser. These are:
- config: argument for specifying a configuration file.
- user: argument for specifying a user.
- dry-run: option for running without side effects.
- verbose: option for running verbosely.
- quiet: option for running quietly.
- version: option for spitting out version information.
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
'''
include = include or ('config', 'user', 'dry-run', 'verbose', 'quiet')
if 'config' in include:
parser.add_argument('-c', '--config', dest='config_file', metavar='PATH', default=None,
type=str, help='bots config file (json or yaml)')
if 'user' in include:
parser.add_argument('-u', '--user', dest='screen_name', type=str, help="Twitter screen name")
if 'dry-run' in include:
parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually do anything")
if 'verbose' in include:
parser.add_argument('-v', '--verbose', action='store_true', help="Run talkatively")
if 'quiet' in include:
parser.add_argument('-q', '--quiet', action='store_true', help="Run quietly")
if version:
parser.add_argument('-V', '--version', action='version', version="%(prog)s " + version) | Add default arguments to a parser. These are:
- config: argument for specifying a configuration file.
- user: argument for specifying a user.
- dry-run: option for running without side effects.
- verbose: option for running verbosely.
- quiet: option for running quietly.
- version: option for spitting out version information.
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet) | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/args.py#L21-L53 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import logging
import argparse
def parent(version=None, include=None):
'''
Return the default args as a parent parser, optionally adding a version
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
'''
parser = argparse.ArgumentParser(add_help=False)
add_default_args(parser, version=version, include=include)
return parser
def add_logger(name, level=None, format=None):
'''
Set up a stdout logger.
Args:
name (str): name of the logger
level: defaults to logging.INFO
format (str): format string for logging output.
defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``.
Returns:
The logger object.
'''
format = format or '%(filename)-11s %(lineno)-3d: %(message)s'
log = logging.getLogger(name)
# Set logging level.
log.setLevel(level or logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter(format))
log.addHandler(ch)
return log
|
fitnr/twitter_bot_utils | twitter_bot_utils/args.py | parent | python | def parent(version=None, include=None):
'''
Return the default args as a parent parser, optionally adding a version
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
'''
parser = argparse.ArgumentParser(add_help=False)
add_default_args(parser, version=version, include=include)
return parser | Return the default args as a parent parser, optionally adding a version
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet) | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/args.py#L56-L66 | [
"def add_default_args(parser, version=None, include=None):\n '''\n Add default arguments to a parser. These are:\n - config: argument for specifying a configuration file.\n - user: argument for specifying a user.\n - dry-run: option for running without side effects.\n - verbose: op... | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import logging
import argparse
def add_default_args(parser, version=None, include=None):
'''
Add default arguments to a parser. These are:
- config: argument for specifying a configuration file.
- user: argument for specifying a user.
- dry-run: option for running without side effects.
- verbose: option for running verbosely.
- quiet: option for running quietly.
- version: option for spitting out version information.
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
'''
include = include or ('config', 'user', 'dry-run', 'verbose', 'quiet')
if 'config' in include:
parser.add_argument('-c', '--config', dest='config_file', metavar='PATH', default=None,
type=str, help='bots config file (json or yaml)')
if 'user' in include:
parser.add_argument('-u', '--user', dest='screen_name', type=str, help="Twitter screen name")
if 'dry-run' in include:
parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually do anything")
if 'verbose' in include:
parser.add_argument('-v', '--verbose', action='store_true', help="Run talkatively")
if 'quiet' in include:
parser.add_argument('-q', '--quiet', action='store_true', help="Run quietly")
if version:
parser.add_argument('-V', '--version', action='version', version="%(prog)s " + version)
def add_logger(name, level=None, format=None):
'''
Set up a stdout logger.
Args:
name (str): name of the logger
level: defaults to logging.INFO
format (str): format string for logging output.
defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``.
Returns:
The logger object.
'''
format = format or '%(filename)-11s %(lineno)-3d: %(message)s'
log = logging.getLogger(name)
# Set logging level.
log.setLevel(level or logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter(format))
log.addHandler(ch)
return log
|
fitnr/twitter_bot_utils | twitter_bot_utils/args.py | add_logger | python | def add_logger(name, level=None, format=None):
'''
Set up a stdout logger.
Args:
name (str): name of the logger
level: defaults to logging.INFO
format (str): format string for logging output.
defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``.
Returns:
The logger object.
'''
format = format or '%(filename)-11s %(lineno)-3d: %(message)s'
log = logging.getLogger(name)
# Set logging level.
log.setLevel(level or logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter(format))
log.addHandler(ch)
return log | Set up a stdout logger.
Args:
name (str): name of the logger
level: defaults to logging.INFO
format (str): format string for logging output.
defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``.
Returns:
The logger object. | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/args.py#L69-L92 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import logging
import argparse
def add_default_args(parser, version=None, include=None):
'''
Add default arguments to a parser. These are:
- config: argument for specifying a configuration file.
- user: argument for specifying a user.
- dry-run: option for running without side effects.
- verbose: option for running verbosely.
- quiet: option for running quietly.
- version: option for spitting out version information.
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
'''
include = include or ('config', 'user', 'dry-run', 'verbose', 'quiet')
if 'config' in include:
parser.add_argument('-c', '--config', dest='config_file', metavar='PATH', default=None,
type=str, help='bots config file (json or yaml)')
if 'user' in include:
parser.add_argument('-u', '--user', dest='screen_name', type=str, help="Twitter screen name")
if 'dry-run' in include:
parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually do anything")
if 'verbose' in include:
parser.add_argument('-v', '--verbose', action='store_true', help="Run talkatively")
if 'quiet' in include:
parser.add_argument('-q', '--quiet', action='store_true', help="Run quietly")
if version:
parser.add_argument('-V', '--version', action='version', version="%(prog)s " + version)
def parent(version=None, include=None):
'''
Return the default args as a parent parser, optionally adding a version
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
'''
parser = argparse.ArgumentParser(add_help=False)
add_default_args(parser, version=version, include=include)
return parser
|
fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | has_entities | python | def has_entities(status):
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False | Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L54-L69 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unicodedata
try:
import HTMLParser
parser = HTMLParser.HTMLParser()
except ImportError:
from html import parser
import six
def has_url(status):
return has_entity(status, 'urls')
def has_hashtag(status):
return has_entity(status, 'hashtags')
def has_mention(status):
return has_entity(status, 'user_mentions')
def has_media(status):
return has_entity(status, 'media')
def has_symbol(status):
return has_entity(status, 'symbols')
def has_entity(status, entitykey):
try:
return len(status.entities[entitykey]) > 0
except AttributeError:
return len(status['entities'][entitykey]) > 0
def format_status(status):
return format_text(status.text)
def format_text(text):
return parser.unescape(text).strip()
def remove_mentions(status):
'''Remove mentions from status text'''
return remove_entities(status, ['user_mentions'])
def remove_urls(status):
'''Remove urls from status text'''
return remove_entities(status, ['urls'])
def remove_symbols(status):
'''Remove symbols from status text'''
return remove_entities(status, ['symbols'])
def remove_hashtags(status):
'''Remove hashtags from status text'''
return remove_entities(status, ['hastags'])
def remove_entity(status, entitytype):
'''Use indices to remove given entity type from status text'''
return remove_entities(status, [entitytype])
def remove_entities(status, entitylist):
'''Remove entities for a list of items.'''
try:
entities = status.entities
text = status.text
except AttributeError:
entities = status.get('entities', dict())
text = status['text']
indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist]
indices.sort(key=lambda x: x[0], reverse=True)
for start, end in indices:
text = text[:start] + text[end:]
return text
def replace_urls(status):
'''
Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str
'''
text = status.text
if not has_url(status):
return text
urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]
urls.sort(key=lambda x: x[0][0], reverse=True)
for (start, end), url in urls:
text = text[:start] + url + text[end:]
return text
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string
def queryize(terms, exclude_screen_name=None):
'''
Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search
'''
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots, sn))
def chomp(text, max_len=280, split=None):
'''
Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.")
'''
split = split or '—;,.'
while length(text) > max_len:
try:
text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]
except IndexError:
return text
return text
def length(text, maxval=None, encoding=None):
'''
Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int
'''
maxval = maxval or 4351
try:
assert not isinstance(text, six.binary_type)
except AssertionError:
raise TypeError('helpers.length requires a unicode argument')
return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
|
fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | remove_entities | python | def remove_entities(status, entitylist):
'''Remove entities for a list of items.'''
try:
entities = status.entities
text = status.text
except AttributeError:
entities = status.get('entities', dict())
text = status['text']
indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist]
indices.sort(key=lambda x: x[0], reverse=True)
for start, end in indices:
text = text[:start] + text[end:]
return text | Remove entities for a list of items. | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L98-L113 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unicodedata
try:
import HTMLParser
parser = HTMLParser.HTMLParser()
except ImportError:
from html import parser
import six
def has_url(status):
return has_entity(status, 'urls')
def has_hashtag(status):
return has_entity(status, 'hashtags')
def has_mention(status):
return has_entity(status, 'user_mentions')
def has_media(status):
return has_entity(status, 'media')
def has_symbol(status):
return has_entity(status, 'symbols')
def has_entity(status, entitykey):
try:
return len(status.entities[entitykey]) > 0
except AttributeError:
return len(status['entities'][entitykey]) > 0
def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False
def format_status(status):
return format_text(status.text)
def format_text(text):
return parser.unescape(text).strip()
def remove_mentions(status):
'''Remove mentions from status text'''
return remove_entities(status, ['user_mentions'])
def remove_urls(status):
'''Remove urls from status text'''
return remove_entities(status, ['urls'])
def remove_symbols(status):
'''Remove symbols from status text'''
return remove_entities(status, ['symbols'])
def remove_hashtags(status):
'''Remove hashtags from status text'''
return remove_entities(status, ['hastags'])
def remove_entity(status, entitytype):
'''Use indices to remove given entity type from status text'''
return remove_entities(status, [entitytype])
def replace_urls(status):
'''
Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str
'''
text = status.text
if not has_url(status):
return text
urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]
urls.sort(key=lambda x: x[0][0], reverse=True)
for (start, end), url in urls:
text = text[:start] + url + text[end:]
return text
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string
def queryize(terms, exclude_screen_name=None):
'''
Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search
'''
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots, sn))
def chomp(text, max_len=280, split=None):
'''
Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.")
'''
split = split or '—;,.'
while length(text) > max_len:
try:
text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]
except IndexError:
return text
return text
def length(text, maxval=None, encoding=None):
'''
Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int
'''
maxval = maxval or 4351
try:
assert not isinstance(text, six.binary_type)
except AssertionError:
raise TypeError('helpers.length requires a unicode argument')
return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
|
fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | replace_urls | python | def replace_urls(status):
'''
Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str
'''
text = status.text
if not has_url(status):
return text
urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]
urls.sort(key=lambda x: x[0][0], reverse=True)
for (start, end), url in urls:
text = text[:start] + url + text[end:]
return text | Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L116-L137 | [
"def has_url(status):\n return has_entity(status, 'urls')\n"
] | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unicodedata
try:
import HTMLParser
parser = HTMLParser.HTMLParser()
except ImportError:
from html import parser
import six
def has_url(status):
return has_entity(status, 'urls')
def has_hashtag(status):
return has_entity(status, 'hashtags')
def has_mention(status):
return has_entity(status, 'user_mentions')
def has_media(status):
return has_entity(status, 'media')
def has_symbol(status):
return has_entity(status, 'symbols')
def has_entity(status, entitykey):
try:
return len(status.entities[entitykey]) > 0
except AttributeError:
return len(status['entities'][entitykey]) > 0
def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False
def format_status(status):
return format_text(status.text)
def format_text(text):
return parser.unescape(text).strip()
def remove_mentions(status):
'''Remove mentions from status text'''
return remove_entities(status, ['user_mentions'])
def remove_urls(status):
'''Remove urls from status text'''
return remove_entities(status, ['urls'])
def remove_symbols(status):
'''Remove symbols from status text'''
return remove_entities(status, ['symbols'])
def remove_hashtags(status):
'''Remove hashtags from status text'''
return remove_entities(status, ['hastags'])
def remove_entity(status, entitytype):
'''Use indices to remove given entity type from status text'''
return remove_entities(status, [entitytype])
def remove_entities(status, entitylist):
'''Remove entities for a list of items.'''
try:
entities = status.entities
text = status.text
except AttributeError:
entities = status.get('entities', dict())
text = status['text']
indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist]
indices.sort(key=lambda x: x[0], reverse=True)
for start, end in indices:
text = text[:start] + text[end:]
return text
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string
def queryize(terms, exclude_screen_name=None):
'''
Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search
'''
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots, sn))
def chomp(text, max_len=280, split=None):
'''
Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.")
'''
split = split or '—;,.'
while length(text) > max_len:
try:
text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]
except IndexError:
return text
return text
def length(text, maxval=None, encoding=None):
'''
Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int
'''
maxval = maxval or 4351
try:
assert not isinstance(text, six.binary_type)
except AssertionError:
raise TypeError('helpers.length requires a unicode argument')
return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
|
fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | shorten | python | def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string | Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)' | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L140-L159 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unicodedata
try:
import HTMLParser
parser = HTMLParser.HTMLParser()
except ImportError:
from html import parser
import six
def has_url(status):
return has_entity(status, 'urls')
def has_hashtag(status):
return has_entity(status, 'hashtags')
def has_mention(status):
return has_entity(status, 'user_mentions')
def has_media(status):
return has_entity(status, 'media')
def has_symbol(status):
return has_entity(status, 'symbols')
def has_entity(status, entitykey):
try:
return len(status.entities[entitykey]) > 0
except AttributeError:
return len(status['entities'][entitykey]) > 0
def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False
def format_status(status):
return format_text(status.text)
def format_text(text):
return parser.unescape(text).strip()
def remove_mentions(status):
'''Remove mentions from status text'''
return remove_entities(status, ['user_mentions'])
def remove_urls(status):
'''Remove urls from status text'''
return remove_entities(status, ['urls'])
def remove_symbols(status):
'''Remove symbols from status text'''
return remove_entities(status, ['symbols'])
def remove_hashtags(status):
'''Remove hashtags from status text'''
return remove_entities(status, ['hastags'])
def remove_entity(status, entitytype):
'''Use indices to remove given entity type from status text'''
return remove_entities(status, [entitytype])
def remove_entities(status, entitylist):
'''Remove entities for a list of items.'''
try:
entities = status.entities
text = status.text
except AttributeError:
entities = status.get('entities', dict())
text = status['text']
indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist]
indices.sort(key=lambda x: x[0], reverse=True)
for start, end in indices:
text = text[:start] + text[end:]
return text
def replace_urls(status):
'''
Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str
'''
text = status.text
if not has_url(status):
return text
urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]
urls.sort(key=lambda x: x[0][0], reverse=True)
for (start, end), url in urls:
text = text[:start] + url + text[end:]
return text
def queryize(terms, exclude_screen_name=None):
'''
Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search
'''
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots, sn))
def chomp(text, max_len=280, split=None):
'''
Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.")
'''
split = split or '—;,.'
while length(text) > max_len:
try:
text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]
except IndexError:
return text
return text
def length(text, maxval=None, encoding=None):
'''
Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int
'''
maxval = maxval or 4351
try:
assert not isinstance(text, six.binary_type)
except AssertionError:
raise TypeError('helpers.length requires a unicode argument')
return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
|
fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | queryize | python | def queryize(terms, exclude_screen_name=None):
'''
Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search
'''
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots, sn)) | Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L162-L181 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unicodedata
try:
import HTMLParser
parser = HTMLParser.HTMLParser()
except ImportError:
from html import parser
import six
def has_url(status):
return has_entity(status, 'urls')
def has_hashtag(status):
return has_entity(status, 'hashtags')
def has_mention(status):
return has_entity(status, 'user_mentions')
def has_media(status):
return has_entity(status, 'media')
def has_symbol(status):
return has_entity(status, 'symbols')
def has_entity(status, entitykey):
try:
return len(status.entities[entitykey]) > 0
except AttributeError:
return len(status['entities'][entitykey]) > 0
def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False
def format_status(status):
return format_text(status.text)
def format_text(text):
return parser.unescape(text).strip()
def remove_mentions(status):
'''Remove mentions from status text'''
return remove_entities(status, ['user_mentions'])
def remove_urls(status):
'''Remove urls from status text'''
return remove_entities(status, ['urls'])
def remove_symbols(status):
'''Remove symbols from status text'''
return remove_entities(status, ['symbols'])
def remove_hashtags(status):
'''Remove hashtags from status text'''
return remove_entities(status, ['hastags'])
def remove_entity(status, entitytype):
'''Use indices to remove given entity type from status text'''
return remove_entities(status, [entitytype])
def remove_entities(status, entitylist):
'''Remove entities for a list of items.'''
try:
entities = status.entities
text = status.text
except AttributeError:
entities = status.get('entities', dict())
text = status['text']
indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist]
indices.sort(key=lambda x: x[0], reverse=True)
for start, end in indices:
text = text[:start] + text[end:]
return text
def replace_urls(status):
'''
Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str
'''
text = status.text
if not has_url(status):
return text
urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]
urls.sort(key=lambda x: x[0][0], reverse=True)
for (start, end), url in urls:
text = text[:start] + url + text[end:]
return text
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string
def chomp(text, max_len=280, split=None):
'''
Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.")
'''
split = split or '—;,.'
while length(text) > max_len:
try:
text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]
except IndexError:
return text
return text
def length(text, maxval=None, encoding=None):
'''
Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int
'''
maxval = maxval or 4351
try:
assert not isinstance(text, six.binary_type)
except AssertionError:
raise TypeError('helpers.length requires a unicode argument')
return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
|
fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | chomp | python | def chomp(text, max_len=280, split=None):
'''
Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.")
'''
split = split or '—;,.'
while length(text) > max_len:
try:
text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]
except IndexError:
return text
return text | Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.") | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L184-L201 | [
"def length(text, maxval=None, encoding=None):\n '''\n Count the length of a str the way Twitter does,\n double-counting \"wide\" characters (e.g. ideographs, emoji)\n\n Args:\n text (str): Text to count. Must be a unicode string in Python 2\n maxval (int): The maximum encoding that will b... | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unicodedata
try:
import HTMLParser
parser = HTMLParser.HTMLParser()
except ImportError:
from html import parser
import six
def has_url(status):
return has_entity(status, 'urls')
def has_hashtag(status):
return has_entity(status, 'hashtags')
def has_mention(status):
return has_entity(status, 'user_mentions')
def has_media(status):
return has_entity(status, 'media')
def has_symbol(status):
return has_entity(status, 'symbols')
def has_entity(status, entitykey):
try:
return len(status.entities[entitykey]) > 0
except AttributeError:
return len(status['entities'][entitykey]) > 0
def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False
def format_status(status):
return format_text(status.text)
def format_text(text):
return parser.unescape(text).strip()
def remove_mentions(status):
'''Remove mentions from status text'''
return remove_entities(status, ['user_mentions'])
def remove_urls(status):
'''Remove urls from status text'''
return remove_entities(status, ['urls'])
def remove_symbols(status):
'''Remove symbols from status text'''
return remove_entities(status, ['symbols'])
def remove_hashtags(status):
'''Remove hashtags from status text'''
return remove_entities(status, ['hastags'])
def remove_entity(status, entitytype):
'''Use indices to remove given entity type from status text'''
return remove_entities(status, [entitytype])
def remove_entities(status, entitylist):
'''Remove entities for a list of items.'''
try:
entities = status.entities
text = status.text
except AttributeError:
entities = status.get('entities', dict())
text = status['text']
indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist]
indices.sort(key=lambda x: x[0], reverse=True)
for start, end in indices:
text = text[:start] + text[end:]
return text
def replace_urls(status):
'''
Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str
'''
text = status.text
if not has_url(status):
return text
urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]
urls.sort(key=lambda x: x[0][0], reverse=True)
for (start, end), url in urls:
text = text[:start] + url + text[end:]
return text
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string
def queryize(terms, exclude_screen_name=None):
'''
Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search
'''
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots, sn))
def length(text, maxval=None, encoding=None):
'''
Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int
'''
maxval = maxval or 4351
try:
assert not isinstance(text, six.binary_type)
except AssertionError:
raise TypeError('helpers.length requires a unicode argument')
return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
|
fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | length | python | def length(text, maxval=None, encoding=None):
'''
Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int
'''
maxval = maxval or 4351
try:
assert not isinstance(text, six.binary_type)
except AssertionError:
raise TypeError('helpers.length requires a unicode argument')
return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text)) | Count the length of a str the way Twitter does,
double-counting "wide" characters (e.g. ideographs, emoji)
Args:
text (str): Text to count. Must be a unicode string in Python 2
maxval (int): The maximum encoding that will be counted as 1 character.
Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)
Returns:
int | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L204-L222 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unicodedata
try:
import HTMLParser
parser = HTMLParser.HTMLParser()
except ImportError:
from html import parser
import six
def has_url(status):
return has_entity(status, 'urls')
def has_hashtag(status):
return has_entity(status, 'hashtags')
def has_mention(status):
return has_entity(status, 'user_mentions')
def has_media(status):
return has_entity(status, 'media')
def has_symbol(status):
return has_entity(status, 'symbols')
def has_entity(status, entitykey):
try:
return len(status.entities[entitykey]) > 0
except AttributeError:
return len(status['entities'][entitykey]) > 0
def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False
def format_status(status):
return format_text(status.text)
def format_text(text):
return parser.unescape(text).strip()
def remove_mentions(status):
'''Remove mentions from status text'''
return remove_entities(status, ['user_mentions'])
def remove_urls(status):
'''Remove urls from status text'''
return remove_entities(status, ['urls'])
def remove_symbols(status):
'''Remove symbols from status text'''
return remove_entities(status, ['symbols'])
def remove_hashtags(status):
'''Remove hashtags from status text'''
return remove_entities(status, ['hastags'])
def remove_entity(status, entitytype):
'''Use indices to remove given entity type from status text'''
return remove_entities(status, [entitytype])
def remove_entities(status, entitylist):
'''Remove entities for a list of items.'''
try:
entities = status.entities
text = status.text
except AttributeError:
entities = status.get('entities', dict())
text = status['text']
indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist]
indices.sort(key=lambda x: x[0], reverse=True)
for start, end in indices:
text = text[:start] + text[end:]
return text
def replace_urls(status):
'''
Replace shorturls in a status with expanded urls.
Args:
status (tweepy.status): A tweepy status object
Returns:
str
'''
text = status.text
if not has_url(status):
return text
urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]
urls.sort(key=lambda x: x[0][0], reverse=True)
for (start, end), url in urls:
text = text[:start] + url + text[end:]
return text
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string
def queryize(terms, exclude_screen_name=None):
'''
Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen name to exclude from the search.
Returns:
A string ready to be passed to tweepy.API.search
'''
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots, sn))
def chomp(text, max_len=280, split=None):
'''
Shorten a string so that it fits under max_len, splitting it at 'split'.
Not guaranteed to return a string under max_len, as it may not be possible
Args:
text (str): String to shorten
max_len (int): maximum length. default 140
split (str): strings to split on (default is common punctuation: "-;,.")
'''
split = split or '—;,.'
while length(text) > max_len:
try:
text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]
except IndexError:
return text
return text
|
fitnr/twitter_bot_utils | twitter_bot_utils/confighelper.py | configure | python | def configure(screen_name=None, config_file=None, app=None, **kwargs):
# Use passed config file, or look for it in the default path.
# Super-optionally, accept a different place to look for the file
dirs = kwargs.pop('default_directories', None)
bases = kwargs.pop('default_bases', None)
file_config = {}
if config_file is not False:
config_file = find_file(config_file, dirs, bases)
file_config = parse(config_file)
# config and keys dicts
# Pull non-authentication settings from the file.
# Kwargs, user, app, and general settings are included, in that order of preference
# Exclude apps and users sections from config
config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')}
user_conf = file_config.get('users', {}).get(screen_name, {})
app = app or user_conf.get('app')
app_conf = file_config.get('apps', {}).get(app, {})
# Pull user and app data from the file
config.update(app_conf)
config.update(user_conf)
# kwargs take precendence over config file
config.update({k: v for k, v in kwargs.items() if v is not None})
return config | Set up a config dictionary using a bots.yaml config file and optional keyword args.
Args:
screen_name (str): screen_name of user to search for in config file
config_file (str): Path to read for the config file
app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.
default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.
default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES. | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/confighelper.py#L43-L80 | [
"def parse(file_path):\n '''Parse a YAML or JSON file.'''\n\n _, ext = path.splitext(file_path)\n\n if ext in ('.yaml', '.yml'):\n func = yaml.load\n\n elif ext == '.json':\n func = json.load\n\n else:\n raise ValueError(\"Unrecognized config file type %s\" % ext)\n\n with ope... | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import product
import json
from os import path, getcwd
# "FileNotFoundError" is a Py 3 thing. If we're in Py 2, we mimic it with a lambda expression.
try:
FileNotFoundError
except NameError:
from errno import ENOENT
FileNotFoundError = lambda x: IOError(ENOENT, x)
import yaml
import tweepy
CONFIG_DIRS = [
getcwd(),
'~',
path.join('~', 'bots'),
]
CONFIG_BASES = [
'bots.yml',
'bots.yaml',
'bots.json'
]
def parse(file_path):
'''Parse a YAML or JSON file.'''
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.load
elif ext == '.json':
func = json.load
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'r') as f:
return func(f)
def find_file(config_file=None, default_directories=None, default_bases=None):
'''Search for a config file in a list of files.'''
if config_file:
if path.exists(path.expanduser(config_file)):
return config_file
else:
raise FileNotFoundError('Config file not found: {}'.format(config_file))
dirs = default_directories or CONFIG_DIRS
dirs = [getcwd()] + dirs
bases = default_bases or CONFIG_BASES
for directory, base in product(dirs, bases):
filepath = path.expanduser(path.join(directory, base))
if path.exists(filepath):
return filepath
raise FileNotFoundError('Config file not found in {}'.format(dirs))
def setup_auth(**keys):
'''Set up Tweepy authentication using passed args or config file settings.'''
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
return auth
def dump(contents, file_path):
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.dump
kwargs = {'canonical': False, 'default_flow_style': False, 'indent': 4}
elif ext == '.json':
func = json.dump
kwargs = {'sort_keys': True, 'indent': 4}
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'w') as f:
func(contents, f, **kwargs)
|
fitnr/twitter_bot_utils | twitter_bot_utils/confighelper.py | parse | python | def parse(file_path):
'''Parse a YAML or JSON file.'''
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.load
elif ext == '.json':
func = json.load
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'r') as f:
return func(f) | Parse a YAML or JSON file. | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/confighelper.py#L83-L98 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import product
import json
from os import path, getcwd
# "FileNotFoundError" is a Py 3 thing. If we're in Py 2, we mimic it with a lambda expression.
try:
FileNotFoundError
except NameError:
from errno import ENOENT
FileNotFoundError = lambda x: IOError(ENOENT, x)
import yaml
import tweepy
CONFIG_DIRS = [
getcwd(),
'~',
path.join('~', 'bots'),
]
CONFIG_BASES = [
'bots.yml',
'bots.yaml',
'bots.json'
]
def configure(screen_name=None, config_file=None, app=None, **kwargs):
"""
Set up a config dictionary using a bots.yaml config file and optional keyword args.
Args:
screen_name (str): screen_name of user to search for in config file
config_file (str): Path to read for the config file
app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.
default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.
default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES.
"""
# Use passed config file, or look for it in the default path.
# Super-optionally, accept a different place to look for the file
dirs = kwargs.pop('default_directories', None)
bases = kwargs.pop('default_bases', None)
file_config = {}
if config_file is not False:
config_file = find_file(config_file, dirs, bases)
file_config = parse(config_file)
# config and keys dicts
# Pull non-authentication settings from the file.
# Kwargs, user, app, and general settings are included, in that order of preference
# Exclude apps and users sections from config
config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')}
user_conf = file_config.get('users', {}).get(screen_name, {})
app = app or user_conf.get('app')
app_conf = file_config.get('apps', {}).get(app, {})
# Pull user and app data from the file
config.update(app_conf)
config.update(user_conf)
# kwargs take precendence over config file
config.update({k: v for k, v in kwargs.items() if v is not None})
return config
def find_file(config_file=None, default_directories=None, default_bases=None):
'''Search for a config file in a list of files.'''
if config_file:
if path.exists(path.expanduser(config_file)):
return config_file
else:
raise FileNotFoundError('Config file not found: {}'.format(config_file))
dirs = default_directories or CONFIG_DIRS
dirs = [getcwd()] + dirs
bases = default_bases or CONFIG_BASES
for directory, base in product(dirs, bases):
filepath = path.expanduser(path.join(directory, base))
if path.exists(filepath):
return filepath
raise FileNotFoundError('Config file not found in {}'.format(dirs))
def setup_auth(**keys):
'''Set up Tweepy authentication using passed args or config file settings.'''
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
return auth
def dump(contents, file_path):
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.dump
kwargs = {'canonical': False, 'default_flow_style': False, 'indent': 4}
elif ext == '.json':
func = json.dump
kwargs = {'sort_keys': True, 'indent': 4}
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'w') as f:
func(contents, f, **kwargs)
|
fitnr/twitter_bot_utils | twitter_bot_utils/confighelper.py | find_file | python | def find_file(config_file=None, default_directories=None, default_bases=None):
'''Search for a config file in a list of files.'''
if config_file:
if path.exists(path.expanduser(config_file)):
return config_file
else:
raise FileNotFoundError('Config file not found: {}'.format(config_file))
dirs = default_directories or CONFIG_DIRS
dirs = [getcwd()] + dirs
bases = default_bases or CONFIG_BASES
for directory, base in product(dirs, bases):
filepath = path.expanduser(path.join(directory, base))
if path.exists(filepath):
return filepath
raise FileNotFoundError('Config file not found in {}'.format(dirs)) | Search for a config file in a list of files. | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/confighelper.py#L101-L120 | [
"FileNotFoundError = lambda x: IOError(ENOENT, x)\n"
] | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import product
import json
from os import path, getcwd
# "FileNotFoundError" is a Py 3 thing. If we're in Py 2, we mimic it with a lambda expression.
try:
FileNotFoundError
except NameError:
from errno import ENOENT
FileNotFoundError = lambda x: IOError(ENOENT, x)
import yaml
import tweepy
CONFIG_DIRS = [
getcwd(),
'~',
path.join('~', 'bots'),
]
CONFIG_BASES = [
'bots.yml',
'bots.yaml',
'bots.json'
]
def configure(screen_name=None, config_file=None, app=None, **kwargs):
"""
Set up a config dictionary using a bots.yaml config file and optional keyword args.
Args:
screen_name (str): screen_name of user to search for in config file
config_file (str): Path to read for the config file
app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.
default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.
default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES.
"""
# Use passed config file, or look for it in the default path.
# Super-optionally, accept a different place to look for the file
dirs = kwargs.pop('default_directories', None)
bases = kwargs.pop('default_bases', None)
file_config = {}
if config_file is not False:
config_file = find_file(config_file, dirs, bases)
file_config = parse(config_file)
# config and keys dicts
# Pull non-authentication settings from the file.
# Kwargs, user, app, and general settings are included, in that order of preference
# Exclude apps and users sections from config
config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')}
user_conf = file_config.get('users', {}).get(screen_name, {})
app = app or user_conf.get('app')
app_conf = file_config.get('apps', {}).get(app, {})
# Pull user and app data from the file
config.update(app_conf)
config.update(user_conf)
# kwargs take precendence over config file
config.update({k: v for k, v in kwargs.items() if v is not None})
return config
def parse(file_path):
'''Parse a YAML or JSON file.'''
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.load
elif ext == '.json':
func = json.load
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'r') as f:
return func(f)
def setup_auth(**keys):
'''Set up Tweepy authentication using passed args or config file settings.'''
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
return auth
def dump(contents, file_path):
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.dump
kwargs = {'canonical': False, 'default_flow_style': False, 'indent': 4}
elif ext == '.json':
func = json.dump
kwargs = {'sort_keys': True, 'indent': 4}
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'w') as f:
func(contents, f, **kwargs)
|
fitnr/twitter_bot_utils | twitter_bot_utils/confighelper.py | setup_auth | python | def setup_auth(**keys):
'''Set up Tweepy authentication using passed args or config file settings.'''
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
return auth | Set up Tweepy authentication using passed args or config file settings. | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/confighelper.py#L123-L130 | null | # -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import product
import json
from os import path, getcwd
# "FileNotFoundError" is a Py 3 thing. If we're in Py 2, we mimic it with a lambda expression.
try:
FileNotFoundError
except NameError:
from errno import ENOENT
FileNotFoundError = lambda x: IOError(ENOENT, x)
import yaml
import tweepy
CONFIG_DIRS = [
getcwd(),
'~',
path.join('~', 'bots'),
]
CONFIG_BASES = [
'bots.yml',
'bots.yaml',
'bots.json'
]
def configure(screen_name=None, config_file=None, app=None, **kwargs):
"""
Set up a config dictionary using a bots.yaml config file and optional keyword args.
Args:
screen_name (str): screen_name of user to search for in config file
config_file (str): Path to read for the config file
app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.
default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.
default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES.
"""
# Use passed config file, or look for it in the default path.
# Super-optionally, accept a different place to look for the file
dirs = kwargs.pop('default_directories', None)
bases = kwargs.pop('default_bases', None)
file_config = {}
if config_file is not False:
config_file = find_file(config_file, dirs, bases)
file_config = parse(config_file)
# config and keys dicts
# Pull non-authentication settings from the file.
# Kwargs, user, app, and general settings are included, in that order of preference
# Exclude apps and users sections from config
config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')}
user_conf = file_config.get('users', {}).get(screen_name, {})
app = app or user_conf.get('app')
app_conf = file_config.get('apps', {}).get(app, {})
# Pull user and app data from the file
config.update(app_conf)
config.update(user_conf)
# kwargs take precendence over config file
config.update({k: v for k, v in kwargs.items() if v is not None})
return config
def parse(file_path):
'''Parse a YAML or JSON file.'''
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.load
elif ext == '.json':
func = json.load
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'r') as f:
return func(f)
def find_file(config_file=None, default_directories=None, default_bases=None):
'''Search for a config file in a list of files.'''
if config_file:
if path.exists(path.expanduser(config_file)):
return config_file
else:
raise FileNotFoundError('Config file not found: {}'.format(config_file))
dirs = default_directories or CONFIG_DIRS
dirs = [getcwd()] + dirs
bases = default_bases or CONFIG_BASES
for directory, base in product(dirs, bases):
filepath = path.expanduser(path.join(directory, base))
if path.exists(filepath):
return filepath
raise FileNotFoundError('Config file not found in {}'.format(dirs))
def dump(contents, file_path):
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.dump
kwargs = {'canonical': False, 'default_flow_style': False, 'indent': 4}
elif ext == '.json':
func = json.dump
kwargs = {'sort_keys': True, 'indent': 4}
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'w') as f:
func(contents, f, **kwargs)
|
fitnr/twitter_bot_utils | twitter_bot_utils/api.py | API.update_status | python | def update_status(self, *pargs, **kwargs):
try:
return super(API, self).update_status(*pargs, **kwargs)
except tweepy.TweepError as e:
if getattr(e, 'api_code', None) == 503:
sleep(10)
return super(API, self).update_status(*pargs, **kwargs)
else:
raise e | Wrapper for tweepy.api.update_status with a 10s wait when twitter is over capacity | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/api.py#L180-L192 | null | class API(tweepy.API):
'''
Extends the tweepy API with config-file handling.
Args:
args (Namespace): argparse.Namespace to read.
screen_name (str): Twitter screen name
config_file (str): Config file. When False, don't read any config files. Defaults to bots.json or bots.yaml in ~/ or ~/bots/.
logger_name (str): Use a logger with this name. Defaults to screen_name
format (str): Format for logger. Defaults to 'file lineno: message'
verbose (bool): Set logging level to DEBUG
quiet (bool): Set logging level to ERROR. Overrides verbose.
use_env (bool): Allow environment variables to override settings. Default: True
kwargs: Other settings will be passed to the config
'''
_last_tweet = _last_reply = _last_retweet = None
max_size_standard = 5120 # standard uploads must be less then 5 MB
max_size_chunked = 15360 # chunked uploads must be less than 15 MB
def __init__(self, args=None, **kwargs):
'''
Construct the tbu.API object.
'''
# Update the kwargs with non-None contents of args
if isinstance(args, Namespace):
kwargs.update({k: v for k, v in vars(args).items() if v is not None})
self._screen_name = kwargs.pop('screen_name', None)
# Add a logger
level = logging.DEBUG if kwargs.pop('verbose', None) else None
level = logging.ERROR if kwargs.get('quiet', None) else level
self.logger = tbu_args.add_logger(kwargs.pop('logger_name', self._screen_name), level,
kwargs.pop('format', None))
# get config file and parse it
config = configure(self._screen_name, **kwargs)
self._config = {k: v for k, v in config.items() if k not in PROTECTED_INFO}
keys = {k: v for k, v in config.items() if k in PROTECTED_INFO}
if kwargs.get('use_env', True):
keys.update({
k: os.environ['TWITTER_' + k.upper()] for k in PROTECTED_INFO
if k not in keys and 'TWITTER_' + k.upper() in os.environ
})
try:
# setup auth
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
try:
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
except KeyError:
# API won't have an access key
pass
except KeyError:
missing = [p for p in PROTECTED_INFO if p not in keys]
raise ValueError("Incomplete config. Missing {}".format(missing))
# initiate api connection
super(API, self).__init__(auth)
@property
def config(self):
return self._config
@property
def screen_name(self):
return self._screen_name
@property
def app(self):
return self._config['app']
def _sinces(self):
tl = self.user_timeline(self.screen_name, count=1000, include_rts=True, exclude_replies=False)
if len(tl) > 0:
self._last_tweet = tl[0].id
else:
self._last_tweet = self._last_reply = self._last_retweet = None
return
try:
self._last_reply = max(t.id for t in tl if t.in_reply_to_user_id)
except ValueError:
self._last_reply = None
try:
self._last_retweet = max(t.id for t in tl if t.retweeted)
except ValueError:
self._last_retweet = None
def _last(self, last_what, refresh):
if refresh or getattr(self, last_what) is None:
self._sinces()
return getattr(self, last_what)
@property
def last_tweet(self, refresh=None):
return self._last('_last_tweet', refresh)
@property
def last_reply(self, refresh=None):
return self._last('_last_reply', refresh)
@property
def last_retweet(self, refresh=None):
return self._last('_last_retweet', refresh)
def media_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
mime, _ = mimetypes.guess_type(filename)
size = getfilesize(filename, f)
if mime in IMAGE_MIMETYPES and size < self.max_size_standard:
return self.image_upload(filename, file=f, *args, **kwargs)
elif mime in CHUNKED_MIMETYPES:
return self.upload_chunked(filename, file=f, *args, **kwargs)
else:
raise TweepError("Can't upload media with mime type %s" % mime)
def image_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:allowed_param:
"""
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, self.max_size_standard, form_field='media', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
def upload_chunked(self, filename, *args, **kwargs):
""" :reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
# Media category is dependant on whether media is attached to a tweet
# or to a direct message. Assume tweet by default.
is_direct_message = kwargs.pop('is_direct_message', False)
# Initialize upload (Twitter cannot handle videos > 15 MB)
headers, post_data, fp = API._chunk_media('init', filename, self.max_size_chunked, form_field='media', f=f, is_direct_message=is_direct_message)
kwargs.update({'headers': headers, 'post_data': post_data})
# Send the INIT request
media_info = bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# If a media ID has been generated, we can send the file
if media_info.media_id:
# default chunk size is 1MB, can be overridden with keyword argument.
# minimum chunk size is 16K, which keeps the maximum number of chunks under 999
chunk_size = kwargs.pop('chunk_size', 1024 * 1024)
chunk_size = max(chunk_size, 16 * 2014)
fsize = getfilesize(filename, f)
nloops = int(fsize / chunk_size) + (1 if fsize % chunk_size > 0 else 0)
for i in range(nloops):
headers, post_data, fp = API._chunk_media('append', filename, self.max_size_chunked, chunk_size=chunk_size, f=fp, media_id=media_info.media_id, segment_index=i, is_direct_message=is_direct_message)
kwargs.update({ 'headers': headers, 'post_data': post_data, 'parser': RawParser() })
# The APPEND command returns an empty response body
bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# When all chunks have been sent, we can finalize.
headers, post_data, fp = API._chunk_media('finalize', filename, self.max_size_chunked, media_id=media_info.media_id, is_direct_message=is_direct_message)
kwargs = {'headers': headers, 'post_data': post_data}
# The FINALIZE command returns media information
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
else:
return media_info
@staticmethod
def _chunk_media(command, filename, max_size, form_field="media", chunk_size=4096, f=None, media_id=None, segment_index=0, is_direct_message=False):
fp = None
if command == 'init':
file_size = getfilesize(filename, f)
if file_size > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
if f is None:
# build the multipart-formdata body
fp = open(filename, 'rb')
else:
fp = f
elif command != 'finalize':
if f is not None:
fp = f
else:
raise TweepError('File input for APPEND is mandatory.')
# video must be mp4
file_type, _ = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
if file_type not in CHUNKED_MIMETYPES:
raise TweepError('Invalid file type for video: %s' % file_type)
BOUNDARY = b'Tw3ePy'
body = list()
if command == 'init':
query = {
'command': 'INIT',
'media_type': file_type,
'total_bytes': file_size,
'media_category': API._get_media_category(
is_direct_message, file_type)
}
body.append(urlencode(query).encode('utf-8'))
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
elif command == 'append':
if media_id is None:
raise TweepError('Media ID is required for APPEND command.')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="command"'.encode('utf-8'))
body.append(b'')
body.append(b'APPEND')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="media_id"'.encode('utf-8'))
body.append(b'')
body.append(str(media_id).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="segment_index"'.encode('utf-8'))
body.append(b'')
body.append(str(segment_index).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(form_field, os.path.basename(filename)).encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read(chunk_size))
body.append(b'--' + BOUNDARY + b'--')
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy'
}
elif command == 'finalize':
if media_id is None:
raise TweepError('Media ID is required for FINALIZE command.')
body.append(
urlencode({
'command': 'FINALIZE',
'media_id': media_id
}).encode('utf-8')
)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
body = b'\r\n'.join(body)
# build headers
headers['Content-Length'] = str(len(body))
return headers, body, fp
@staticmethod
def _get_media_category(is_direct_message, file_type):
""" :reference: https://developer.twitter.com/en/docs/direct-messages/message-attachments/guides/attaching-media
:allowed_param:
"""
if is_direct_message:
prefix = 'dm'
else:
prefix = 'tweet'
if file_type in IMAGE_MIMETYPES:
if file_type == 'image/gif':
return prefix + '_gif'
else:
return prefix + '_image'
elif file_type == 'video/mp4':
return prefix + '_video'
|
fitnr/twitter_bot_utils | twitter_bot_utils/api.py | API.media_upload | python | def media_upload(self, filename, *args, **kwargs):
f = kwargs.pop('file', None)
mime, _ = mimetypes.guess_type(filename)
size = getfilesize(filename, f)
if mime in IMAGE_MIMETYPES and size < self.max_size_standard:
return self.image_upload(filename, file=f, *args, **kwargs)
elif mime in CHUNKED_MIMETYPES:
return self.upload_chunked(filename, file=f, *args, **kwargs)
else:
raise TweepError("Can't upload media with mime type %s" % mime) | :reference: https://dev.twitter.com/rest/reference/post/media/upload
:reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param: | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/api.py#L195-L212 | [
"def getfilesize(filename, f=None):\n if f is None:\n try:\n size = os.path.getsize(filename)\n except os.error as e:\n raise TweepError('Unable to access file: %s' % e.strerror)\n\n else:\n f.seek(0, 2) # Seek to end of file\n size = f.tell()\n f.seek... | class API(tweepy.API):
'''
Extends the tweepy API with config-file handling.
Args:
args (Namespace): argparse.Namespace to read.
screen_name (str): Twitter screen name
config_file (str): Config file. When False, don't read any config files. Defaults to bots.json or bots.yaml in ~/ or ~/bots/.
logger_name (str): Use a logger with this name. Defaults to screen_name
format (str): Format for logger. Defaults to 'file lineno: message'
verbose (bool): Set logging level to DEBUG
quiet (bool): Set logging level to ERROR. Overrides verbose.
use_env (bool): Allow environment variables to override settings. Default: True
kwargs: Other settings will be passed to the config
'''
_last_tweet = _last_reply = _last_retweet = None
max_size_standard = 5120 # standard uploads must be less then 5 MB
max_size_chunked = 15360 # chunked uploads must be less than 15 MB
def __init__(self, args=None, **kwargs):
'''
Construct the tbu.API object.
'''
# Update the kwargs with non-None contents of args
if isinstance(args, Namespace):
kwargs.update({k: v for k, v in vars(args).items() if v is not None})
self._screen_name = kwargs.pop('screen_name', None)
# Add a logger
level = logging.DEBUG if kwargs.pop('verbose', None) else None
level = logging.ERROR if kwargs.get('quiet', None) else level
self.logger = tbu_args.add_logger(kwargs.pop('logger_name', self._screen_name), level,
kwargs.pop('format', None))
# get config file and parse it
config = configure(self._screen_name, **kwargs)
self._config = {k: v for k, v in config.items() if k not in PROTECTED_INFO}
keys = {k: v for k, v in config.items() if k in PROTECTED_INFO}
if kwargs.get('use_env', True):
keys.update({
k: os.environ['TWITTER_' + k.upper()] for k in PROTECTED_INFO
if k not in keys and 'TWITTER_' + k.upper() in os.environ
})
try:
# setup auth
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
try:
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
except KeyError:
# API won't have an access key
pass
except KeyError:
missing = [p for p in PROTECTED_INFO if p not in keys]
raise ValueError("Incomplete config. Missing {}".format(missing))
# initiate api connection
super(API, self).__init__(auth)
@property
def config(self):
return self._config
@property
def screen_name(self):
return self._screen_name
@property
def app(self):
return self._config['app']
def _sinces(self):
tl = self.user_timeline(self.screen_name, count=1000, include_rts=True, exclude_replies=False)
if len(tl) > 0:
self._last_tweet = tl[0].id
else:
self._last_tweet = self._last_reply = self._last_retweet = None
return
try:
self._last_reply = max(t.id for t in tl if t.in_reply_to_user_id)
except ValueError:
self._last_reply = None
try:
self._last_retweet = max(t.id for t in tl if t.retweeted)
except ValueError:
self._last_retweet = None
def _last(self, last_what, refresh):
if refresh or getattr(self, last_what) is None:
self._sinces()
return getattr(self, last_what)
@property
def last_tweet(self, refresh=None):
return self._last('_last_tweet', refresh)
@property
def last_reply(self, refresh=None):
return self._last('_last_reply', refresh)
@property
def last_retweet(self, refresh=None):
return self._last('_last_retweet', refresh)
def update_status(self, *pargs, **kwargs):
"""
Wrapper for tweepy.api.update_status with a 10s wait when twitter is over capacity
"""
try:
return super(API, self).update_status(*pargs, **kwargs)
except tweepy.TweepError as e:
if getattr(e, 'api_code', None) == 503:
sleep(10)
return super(API, self).update_status(*pargs, **kwargs)
else:
raise e
def image_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:allowed_param:
"""
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, self.max_size_standard, form_field='media', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
def upload_chunked(self, filename, *args, **kwargs):
""" :reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
# Media category is dependant on whether media is attached to a tweet
# or to a direct message. Assume tweet by default.
is_direct_message = kwargs.pop('is_direct_message', False)
# Initialize upload (Twitter cannot handle videos > 15 MB)
headers, post_data, fp = API._chunk_media('init', filename, self.max_size_chunked, form_field='media', f=f, is_direct_message=is_direct_message)
kwargs.update({'headers': headers, 'post_data': post_data})
# Send the INIT request
media_info = bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# If a media ID has been generated, we can send the file
if media_info.media_id:
# default chunk size is 1MB, can be overridden with keyword argument.
# minimum chunk size is 16K, which keeps the maximum number of chunks under 999
chunk_size = kwargs.pop('chunk_size', 1024 * 1024)
chunk_size = max(chunk_size, 16 * 2014)
fsize = getfilesize(filename, f)
nloops = int(fsize / chunk_size) + (1 if fsize % chunk_size > 0 else 0)
for i in range(nloops):
headers, post_data, fp = API._chunk_media('append', filename, self.max_size_chunked, chunk_size=chunk_size, f=fp, media_id=media_info.media_id, segment_index=i, is_direct_message=is_direct_message)
kwargs.update({ 'headers': headers, 'post_data': post_data, 'parser': RawParser() })
# The APPEND command returns an empty response body
bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# When all chunks have been sent, we can finalize.
headers, post_data, fp = API._chunk_media('finalize', filename, self.max_size_chunked, media_id=media_info.media_id, is_direct_message=is_direct_message)
kwargs = {'headers': headers, 'post_data': post_data}
# The FINALIZE command returns media information
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
else:
return media_info
@staticmethod
def _chunk_media(command, filename, max_size, form_field="media", chunk_size=4096, f=None, media_id=None, segment_index=0, is_direct_message=False):
fp = None
if command == 'init':
file_size = getfilesize(filename, f)
if file_size > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
if f is None:
# build the multipart-formdata body
fp = open(filename, 'rb')
else:
fp = f
elif command != 'finalize':
if f is not None:
fp = f
else:
raise TweepError('File input for APPEND is mandatory.')
# video must be mp4
file_type, _ = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
if file_type not in CHUNKED_MIMETYPES:
raise TweepError('Invalid file type for video: %s' % file_type)
BOUNDARY = b'Tw3ePy'
body = list()
if command == 'init':
query = {
'command': 'INIT',
'media_type': file_type,
'total_bytes': file_size,
'media_category': API._get_media_category(
is_direct_message, file_type)
}
body.append(urlencode(query).encode('utf-8'))
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
elif command == 'append':
if media_id is None:
raise TweepError('Media ID is required for APPEND command.')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="command"'.encode('utf-8'))
body.append(b'')
body.append(b'APPEND')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="media_id"'.encode('utf-8'))
body.append(b'')
body.append(str(media_id).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="segment_index"'.encode('utf-8'))
body.append(b'')
body.append(str(segment_index).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(form_field, os.path.basename(filename)).encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read(chunk_size))
body.append(b'--' + BOUNDARY + b'--')
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy'
}
elif command == 'finalize':
if media_id is None:
raise TweepError('Media ID is required for FINALIZE command.')
body.append(
urlencode({
'command': 'FINALIZE',
'media_id': media_id
}).encode('utf-8')
)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
body = b'\r\n'.join(body)
# build headers
headers['Content-Length'] = str(len(body))
return headers, body, fp
@staticmethod
def _get_media_category(is_direct_message, file_type):
""" :reference: https://developer.twitter.com/en/docs/direct-messages/message-attachments/guides/attaching-media
:allowed_param:
"""
if is_direct_message:
prefix = 'dm'
else:
prefix = 'tweet'
if file_type in IMAGE_MIMETYPES:
if file_type == 'image/gif':
return prefix + '_gif'
else:
return prefix + '_image'
elif file_type == 'video/mp4':
return prefix + '_video'
|
fitnr/twitter_bot_utils | twitter_bot_utils/api.py | API.upload_chunked | python | def upload_chunked(self, filename, *args, **kwargs):
f = kwargs.pop('file', None)
# Media category is dependant on whether media is attached to a tweet
# or to a direct message. Assume tweet by default.
is_direct_message = kwargs.pop('is_direct_message', False)
# Initialize upload (Twitter cannot handle videos > 15 MB)
headers, post_data, fp = API._chunk_media('init', filename, self.max_size_chunked, form_field='media', f=f, is_direct_message=is_direct_message)
kwargs.update({'headers': headers, 'post_data': post_data})
# Send the INIT request
media_info = bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# If a media ID has been generated, we can send the file
if media_info.media_id:
# default chunk size is 1MB, can be overridden with keyword argument.
# minimum chunk size is 16K, which keeps the maximum number of chunks under 999
chunk_size = kwargs.pop('chunk_size', 1024 * 1024)
chunk_size = max(chunk_size, 16 * 2014)
fsize = getfilesize(filename, f)
nloops = int(fsize / chunk_size) + (1 if fsize % chunk_size > 0 else 0)
for i in range(nloops):
headers, post_data, fp = API._chunk_media('append', filename, self.max_size_chunked, chunk_size=chunk_size, f=fp, media_id=media_info.media_id, segment_index=i, is_direct_message=is_direct_message)
kwargs.update({ 'headers': headers, 'post_data': post_data, 'parser': RawParser() })
# The APPEND command returns an empty response body
bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# When all chunks have been sent, we can finalize.
headers, post_data, fp = API._chunk_media('finalize', filename, self.max_size_chunked, media_id=media_info.media_id, is_direct_message=is_direct_message)
kwargs = {'headers': headers, 'post_data': post_data}
# The FINALIZE command returns media information
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
else:
return media_info | :reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param: | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/api.py#L232-L294 | [
"def getfilesize(filename, f=None):\n if f is None:\n try:\n size = os.path.getsize(filename)\n except os.error as e:\n raise TweepError('Unable to access file: %s' % e.strerror)\n\n else:\n f.seek(0, 2) # Seek to end of file\n size = f.tell()\n f.seek... | class API(tweepy.API):
'''
Extends the tweepy API with config-file handling.
Args:
args (Namespace): argparse.Namespace to read.
screen_name (str): Twitter screen name
config_file (str): Config file. When False, don't read any config files. Defaults to bots.json or bots.yaml in ~/ or ~/bots/.
logger_name (str): Use a logger with this name. Defaults to screen_name
format (str): Format for logger. Defaults to 'file lineno: message'
verbose (bool): Set logging level to DEBUG
quiet (bool): Set logging level to ERROR. Overrides verbose.
use_env (bool): Allow environment variables to override settings. Default: True
kwargs: Other settings will be passed to the config
'''
_last_tweet = _last_reply = _last_retweet = None
max_size_standard = 5120 # standard uploads must be less then 5 MB
max_size_chunked = 15360 # chunked uploads must be less than 15 MB
def __init__(self, args=None, **kwargs):
'''
Construct the tbu.API object.
'''
# Update the kwargs with non-None contents of args
if isinstance(args, Namespace):
kwargs.update({k: v for k, v in vars(args).items() if v is not None})
self._screen_name = kwargs.pop('screen_name', None)
# Add a logger
level = logging.DEBUG if kwargs.pop('verbose', None) else None
level = logging.ERROR if kwargs.get('quiet', None) else level
self.logger = tbu_args.add_logger(kwargs.pop('logger_name', self._screen_name), level,
kwargs.pop('format', None))
# get config file and parse it
config = configure(self._screen_name, **kwargs)
self._config = {k: v for k, v in config.items() if k not in PROTECTED_INFO}
keys = {k: v for k, v in config.items() if k in PROTECTED_INFO}
if kwargs.get('use_env', True):
keys.update({
k: os.environ['TWITTER_' + k.upper()] for k in PROTECTED_INFO
if k not in keys and 'TWITTER_' + k.upper() in os.environ
})
try:
# setup auth
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
try:
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
except KeyError:
# API won't have an access key
pass
except KeyError:
missing = [p for p in PROTECTED_INFO if p not in keys]
raise ValueError("Incomplete config. Missing {}".format(missing))
# initiate api connection
super(API, self).__init__(auth)
@property
def config(self):
return self._config
@property
def screen_name(self):
return self._screen_name
@property
def app(self):
return self._config['app']
def _sinces(self):
tl = self.user_timeline(self.screen_name, count=1000, include_rts=True, exclude_replies=False)
if len(tl) > 0:
self._last_tweet = tl[0].id
else:
self._last_tweet = self._last_reply = self._last_retweet = None
return
try:
self._last_reply = max(t.id for t in tl if t.in_reply_to_user_id)
except ValueError:
self._last_reply = None
try:
self._last_retweet = max(t.id for t in tl if t.retweeted)
except ValueError:
self._last_retweet = None
def _last(self, last_what, refresh):
if refresh or getattr(self, last_what) is None:
self._sinces()
return getattr(self, last_what)
@property
def last_tweet(self, refresh=None):
return self._last('_last_tweet', refresh)
@property
def last_reply(self, refresh=None):
return self._last('_last_reply', refresh)
@property
def last_retweet(self, refresh=None):
return self._last('_last_retweet', refresh)
def update_status(self, *pargs, **kwargs):
"""
Wrapper for tweepy.api.update_status with a 10s wait when twitter is over capacity
"""
try:
return super(API, self).update_status(*pargs, **kwargs)
except tweepy.TweepError as e:
if getattr(e, 'api_code', None) == 503:
sleep(10)
return super(API, self).update_status(*pargs, **kwargs)
else:
raise e
def media_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
mime, _ = mimetypes.guess_type(filename)
size = getfilesize(filename, f)
if mime in IMAGE_MIMETYPES and size < self.max_size_standard:
return self.image_upload(filename, file=f, *args, **kwargs)
elif mime in CHUNKED_MIMETYPES:
return self.upload_chunked(filename, file=f, *args, **kwargs)
else:
raise TweepError("Can't upload media with mime type %s" % mime)
def image_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:allowed_param:
"""
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, self.max_size_standard, form_field='media', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
@staticmethod
def _chunk_media(command, filename, max_size, form_field="media", chunk_size=4096, f=None, media_id=None, segment_index=0, is_direct_message=False):
fp = None
if command == 'init':
file_size = getfilesize(filename, f)
if file_size > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
if f is None:
# build the multipart-formdata body
fp = open(filename, 'rb')
else:
fp = f
elif command != 'finalize':
if f is not None:
fp = f
else:
raise TweepError('File input for APPEND is mandatory.')
# video must be mp4
file_type, _ = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
if file_type not in CHUNKED_MIMETYPES:
raise TweepError('Invalid file type for video: %s' % file_type)
BOUNDARY = b'Tw3ePy'
body = list()
if command == 'init':
query = {
'command': 'INIT',
'media_type': file_type,
'total_bytes': file_size,
'media_category': API._get_media_category(
is_direct_message, file_type)
}
body.append(urlencode(query).encode('utf-8'))
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
elif command == 'append':
if media_id is None:
raise TweepError('Media ID is required for APPEND command.')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="command"'.encode('utf-8'))
body.append(b'')
body.append(b'APPEND')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="media_id"'.encode('utf-8'))
body.append(b'')
body.append(str(media_id).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="segment_index"'.encode('utf-8'))
body.append(b'')
body.append(str(segment_index).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(form_field, os.path.basename(filename)).encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read(chunk_size))
body.append(b'--' + BOUNDARY + b'--')
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy'
}
elif command == 'finalize':
if media_id is None:
raise TweepError('Media ID is required for FINALIZE command.')
body.append(
urlencode({
'command': 'FINALIZE',
'media_id': media_id
}).encode('utf-8')
)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
body = b'\r\n'.join(body)
# build headers
headers['Content-Length'] = str(len(body))
return headers, body, fp
@staticmethod
def _get_media_category(is_direct_message, file_type):
""" :reference: https://developer.twitter.com/en/docs/direct-messages/message-attachments/guides/attaching-media
:allowed_param:
"""
if is_direct_message:
prefix = 'dm'
else:
prefix = 'tweet'
if file_type in IMAGE_MIMETYPES:
if file_type == 'image/gif':
return prefix + '_gif'
else:
return prefix + '_image'
elif file_type == 'video/mp4':
return prefix + '_video'
|
fitnr/twitter_bot_utils | twitter_bot_utils/api.py | API._get_media_category | python | def _get_media_category(is_direct_message, file_type):
if is_direct_message:
prefix = 'dm'
else:
prefix = 'tweet'
if file_type in IMAGE_MIMETYPES:
if file_type == 'image/gif':
return prefix + '_gif'
else:
return prefix + '_image'
elif file_type == 'video/mp4':
return prefix + '_video' | :reference: https://developer.twitter.com/en/docs/direct-messages/message-attachments/guides/attaching-media
:allowed_param: | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/api.py#L382-L397 | null | class API(tweepy.API):
'''
Extends the tweepy API with config-file handling.
Args:
args (Namespace): argparse.Namespace to read.
screen_name (str): Twitter screen name
config_file (str): Config file. When False, don't read any config files. Defaults to bots.json or bots.yaml in ~/ or ~/bots/.
logger_name (str): Use a logger with this name. Defaults to screen_name
format (str): Format for logger. Defaults to 'file lineno: message'
verbose (bool): Set logging level to DEBUG
quiet (bool): Set logging level to ERROR. Overrides verbose.
use_env (bool): Allow environment variables to override settings. Default: True
kwargs: Other settings will be passed to the config
'''
_last_tweet = _last_reply = _last_retweet = None
max_size_standard = 5120 # standard uploads must be less then 5 MB
max_size_chunked = 15360 # chunked uploads must be less than 15 MB
def __init__(self, args=None, **kwargs):
'''
Construct the tbu.API object.
'''
# Update the kwargs with non-None contents of args
if isinstance(args, Namespace):
kwargs.update({k: v for k, v in vars(args).items() if v is not None})
self._screen_name = kwargs.pop('screen_name', None)
# Add a logger
level = logging.DEBUG if kwargs.pop('verbose', None) else None
level = logging.ERROR if kwargs.get('quiet', None) else level
self.logger = tbu_args.add_logger(kwargs.pop('logger_name', self._screen_name), level,
kwargs.pop('format', None))
# get config file and parse it
config = configure(self._screen_name, **kwargs)
self._config = {k: v for k, v in config.items() if k not in PROTECTED_INFO}
keys = {k: v for k, v in config.items() if k in PROTECTED_INFO}
if kwargs.get('use_env', True):
keys.update({
k: os.environ['TWITTER_' + k.upper()] for k in PROTECTED_INFO
if k not in keys and 'TWITTER_' + k.upper() in os.environ
})
try:
# setup auth
auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret'])
try:
auth.set_access_token(
key=keys.get('token', keys.get('key', keys.get('oauth_token'))),
secret=keys.get('secret', keys.get('oauth_secret'))
)
except KeyError:
# API won't have an access key
pass
except KeyError:
missing = [p for p in PROTECTED_INFO if p not in keys]
raise ValueError("Incomplete config. Missing {}".format(missing))
# initiate api connection
super(API, self).__init__(auth)
@property
def config(self):
return self._config
@property
def screen_name(self):
return self._screen_name
@property
def app(self):
return self._config['app']
def _sinces(self):
tl = self.user_timeline(self.screen_name, count=1000, include_rts=True, exclude_replies=False)
if len(tl) > 0:
self._last_tweet = tl[0].id
else:
self._last_tweet = self._last_reply = self._last_retweet = None
return
try:
self._last_reply = max(t.id for t in tl if t.in_reply_to_user_id)
except ValueError:
self._last_reply = None
try:
self._last_retweet = max(t.id for t in tl if t.retweeted)
except ValueError:
self._last_retweet = None
def _last(self, last_what, refresh):
if refresh or getattr(self, last_what) is None:
self._sinces()
return getattr(self, last_what)
@property
def last_tweet(self, refresh=None):
return self._last('_last_tweet', refresh)
@property
def last_reply(self, refresh=None):
return self._last('_last_reply', refresh)
@property
def last_retweet(self, refresh=None):
return self._last('_last_retweet', refresh)
def update_status(self, *pargs, **kwargs):
"""
Wrapper for tweepy.api.update_status with a 10s wait when twitter is over capacity
"""
try:
return super(API, self).update_status(*pargs, **kwargs)
except tweepy.TweepError as e:
if getattr(e, 'api_code', None) == 503:
sleep(10)
return super(API, self).update_status(*pargs, **kwargs)
else:
raise e
def media_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
mime, _ = mimetypes.guess_type(filename)
size = getfilesize(filename, f)
if mime in IMAGE_MIMETYPES and size < self.max_size_standard:
return self.image_upload(filename, file=f, *args, **kwargs)
elif mime in CHUNKED_MIMETYPES:
return self.upload_chunked(filename, file=f, *args, **kwargs)
else:
raise TweepError("Can't upload media with mime type %s" % mime)
def image_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:allowed_param:
"""
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, self.max_size_standard, form_field='media', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
def upload_chunked(self, filename, *args, **kwargs):
""" :reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
# Media category is dependant on whether media is attached to a tweet
# or to a direct message. Assume tweet by default.
is_direct_message = kwargs.pop('is_direct_message', False)
# Initialize upload (Twitter cannot handle videos > 15 MB)
headers, post_data, fp = API._chunk_media('init', filename, self.max_size_chunked, form_field='media', f=f, is_direct_message=is_direct_message)
kwargs.update({'headers': headers, 'post_data': post_data})
# Send the INIT request
media_info = bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# If a media ID has been generated, we can send the file
if media_info.media_id:
# default chunk size is 1MB, can be overridden with keyword argument.
# minimum chunk size is 16K, which keeps the maximum number of chunks under 999
chunk_size = kwargs.pop('chunk_size', 1024 * 1024)
chunk_size = max(chunk_size, 16 * 2014)
fsize = getfilesize(filename, f)
nloops = int(fsize / chunk_size) + (1 if fsize % chunk_size > 0 else 0)
for i in range(nloops):
headers, post_data, fp = API._chunk_media('append', filename, self.max_size_chunked, chunk_size=chunk_size, f=fp, media_id=media_info.media_id, segment_index=i, is_direct_message=is_direct_message)
kwargs.update({ 'headers': headers, 'post_data': post_data, 'parser': RawParser() })
# The APPEND command returns an empty response body
bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# When all chunks have been sent, we can finalize.
headers, post_data, fp = API._chunk_media('finalize', filename, self.max_size_chunked, media_id=media_info.media_id, is_direct_message=is_direct_message)
kwargs = {'headers': headers, 'post_data': post_data}
# The FINALIZE command returns media information
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
else:
return media_info
@staticmethod
def _chunk_media(command, filename, max_size, form_field="media", chunk_size=4096, f=None, media_id=None, segment_index=0, is_direct_message=False):
fp = None
if command == 'init':
file_size = getfilesize(filename, f)
if file_size > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
if f is None:
# build the multipart-formdata body
fp = open(filename, 'rb')
else:
fp = f
elif command != 'finalize':
if f is not None:
fp = f
else:
raise TweepError('File input for APPEND is mandatory.')
# video must be mp4
file_type, _ = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
if file_type not in CHUNKED_MIMETYPES:
raise TweepError('Invalid file type for video: %s' % file_type)
BOUNDARY = b'Tw3ePy'
body = list()
if command == 'init':
query = {
'command': 'INIT',
'media_type': file_type,
'total_bytes': file_size,
'media_category': API._get_media_category(
is_direct_message, file_type)
}
body.append(urlencode(query).encode('utf-8'))
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
elif command == 'append':
if media_id is None:
raise TweepError('Media ID is required for APPEND command.')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="command"'.encode('utf-8'))
body.append(b'')
body.append(b'APPEND')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="media_id"'.encode('utf-8'))
body.append(b'')
body.append(str(media_id).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="segment_index"'.encode('utf-8'))
body.append(b'')
body.append(str(segment_index).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(form_field, os.path.basename(filename)).encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read(chunk_size))
body.append(b'--' + BOUNDARY + b'--')
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy'
}
elif command == 'finalize':
if media_id is None:
raise TweepError('Media ID is required for FINALIZE command.')
body.append(
urlencode({
'command': 'FINALIZE',
'media_id': media_id
}).encode('utf-8')
)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
body = b'\r\n'.join(body)
# build headers
headers['Content-Length'] = str(len(body))
return headers, body, fp
@staticmethod
|
wishtack/pysynthetic | synthetic/synthetic_decorator_factory.py | SyntheticDecoratorFactory.syntheticMemberDecorator | python | def syntheticMemberDecorator(self,
memberName,
defaultValue,
contract,
readOnly,
privateMemberName,
memberDelegate):
def decoratorFunction(cls):
syntheticMember = SyntheticMember(memberName,
defaultValue,
contract,
readOnly,
privateMemberName,
memberDelegate = memberDelegate)
SyntheticClassController(cls).addSyntheticMember(syntheticMember)
return cls
return decoratorFunction | :type memberName: str
:type readOnly: bool
:type privateMemberName: str|None
:type memberDelegate: IMemberDelegate | train | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_decorator_factory.py#L23-L47 | null | class SyntheticDecoratorFactory:
@contract
def syntheticConstructorDecorator(self):
def functionWrapper(cls):
# This will be used later to tell the new constructor to consume parameters to initialize members.
SyntheticClassController(cls).synthesizeConstructor()
return cls
return functionWrapper
def syntheticEqualityDecorator(self):
def functionWrapper(cls):
# This will be used to configure that equality operations should be generated
SyntheticClassController(cls).synthesizeEquality()
return cls
return functionWrapper
def namingConventionDecorator(self, namingConvention):
"""
:type namingConvention:INamingConvention
"""
def decoratorFunction(cls):
SyntheticClassController(cls).setNamingConvention(namingConvention)
return cls
return decoratorFunction
|
wishtack/pysynthetic | synthetic/synthetic_decorator_factory.py | SyntheticDecoratorFactory.namingConventionDecorator | python | def namingConventionDecorator(self, namingConvention):
def decoratorFunction(cls):
SyntheticClassController(cls).setNamingConvention(namingConvention)
return cls
return decoratorFunction | :type namingConvention:INamingConvention | train | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_decorator_factory.py#L65-L73 | null | class SyntheticDecoratorFactory:
@contract
def syntheticMemberDecorator(self,
memberName,
defaultValue,
contract,
readOnly,
privateMemberName,
memberDelegate):
"""
:type memberName: str
:type readOnly: bool
:type privateMemberName: str|None
:type memberDelegate: IMemberDelegate
"""
def decoratorFunction(cls):
syntheticMember = SyntheticMember(memberName,
defaultValue,
contract,
readOnly,
privateMemberName,
memberDelegate = memberDelegate)
SyntheticClassController(cls).addSyntheticMember(syntheticMember)
return cls
return decoratorFunction
def syntheticConstructorDecorator(self):
def functionWrapper(cls):
# This will be used later to tell the new constructor to consume parameters to initialize members.
SyntheticClassController(cls).synthesizeConstructor()
return cls
return functionWrapper
def syntheticEqualityDecorator(self):
def functionWrapper(cls):
# This will be used to configure that equality operations should be generated
SyntheticClassController(cls).synthesizeEquality()
return cls
return functionWrapper
def namingConventionDecorator(self, namingConvention):
"""
:type namingConvention:INamingConvention
"""
def decoratorFunction(cls):
SyntheticClassController(cls).setNamingConvention(namingConvention)
return cls
return decoratorFunction
|
wishtack/pysynthetic | synthetic/synthetic_constructor_factory.py | SyntheticConstructorFactory.makeConstructor | python | def makeConstructor(self, originalConstructor, syntheticMemberList, doesConsumeArguments):
# Original constructor's expected args.
originalConstructorExpectedArgList = []
doesExpectVariadicArgs = False
doesExpectKeywordedArgs = False
if inspect.isfunction(originalConstructor) or inspect.ismethod(originalConstructor):
argSpec = inspect.getargspec(originalConstructor)
# originalConstructorExpectedArgList = expected args - self.
originalConstructorExpectedArgList = argSpec.args[1:]
doesExpectVariadicArgs = (argSpec.varargs is not None)
doesExpectKeywordedArgs = (argSpec.keywords is not None)
def init(instance, *args, **kwargs):
if doesConsumeArguments:
# Merge original constructor's args specification with member list and make an args dict.
positionalArgumentKeyValueList = self._positionalArgumentKeyValueList(
originalConstructorExpectedArgList,
syntheticMemberList,
args)
# Set members values.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
# Default value.
value = syntheticMember.default()
# Constructor is synthesized.
if doesConsumeArguments:
value = self._consumeArgument(memberName,
positionalArgumentKeyValueList,
kwargs,
value)
# Checking that the contract is respected.
syntheticMember.checkContract(memberName, value)
# Initalizing member with a value.
setattr(instance,
syntheticMember.privateMemberName(),
value)
if doesConsumeArguments:
# Remove superfluous arguments that have been used for synthesization but are not expected by constructor.
args, kwargs = self._filterArgsAndKwargs(
originalConstructorExpectedArgList=originalConstructorExpectedArgList,
syntheticMemberList=syntheticMemberList,
positionalArgumentKeyValueList=positionalArgumentKeyValueList,
keywordedArgDict=kwargs
)
# Call original constructor.
if originalConstructor is not None:
originalConstructor(instance, *args, **kwargs)
return init | :type syntheticMemberList: list(SyntheticMember)
:type doesConsumeArguments: bool | train | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_constructor_factory.py#L19-L82 | null | class SyntheticConstructorFactory:
@contract
@contract
def _positionalArgumentKeyValueList(self,
originalConstructorExpectedArgList,
syntheticMemberList,
argTuple):
"""Transforms args tuple to a dictionary mapping argument names to values using original constructor
positional args specification, then it adds synthesized members at the end if they are not already present.
:type syntheticMemberList: list(SyntheticMember)
:type argTuple: tuple
"""
# First, the list of expected arguments is set to original constructor's arg spec.
expectedArgList = copy.copy(originalConstructorExpectedArgList)
# ... then we append members that are not already present.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
if memberName not in expectedArgList:
expectedArgList.append(memberName)
# Makes a list of tuples (argumentName, argumentValue) with each element of each list (expectedArgList, argTuple)
# until the shortest list's end is reached.
positionalArgumentKeyValueList = list(zip(expectedArgList, argTuple))
# Add remanining arguments (those that are not expected by the original constructor).
for argumentValue in argTuple[len(positionalArgumentKeyValueList):]:
positionalArgumentKeyValueList.append((None, argumentValue))
return positionalArgumentKeyValueList
@contract
def _consumeArgument(self,
memberName,
positionalArgumentKeyValueList,
kwargs,
defaultValue):
"""Returns member's value from kwargs if found or from positionalArgumentKeyValueList if found
or default value otherwise.
:type memberName: str
:type positionalArgumentKeyValueList: list(tuple)
:type kwargs: dict(string:*)
"""
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
if memberName in kwargs:
return kwargs[memberName]
if memberName in positionalArgumentDict:
return positionalArgumentDict[memberName]
return defaultValue
@contract
def _filterArgsAndKwargs(
self,
originalConstructorExpectedArgList,
syntheticMemberList,
positionalArgumentKeyValueList,
keywordedArgDict):
"""Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor.
If original constructor accepts variadic args, all variadic args are forwarded.
If original constructor accepts keyworded args, all keyworded args are forwarded.
:type originalConstructorExpectedArgList: list(str)
:type syntheticMemberList: list(SyntheticMember)
:type positionalArgumentKeyValueList: list(tuple)
:type keywordedArgDict: dict(string:*)
"""
# List is initialized with all variadic arguments.
positionalArgumentKeyValueList = copy.copy(positionalArgumentKeyValueList)
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
# Dict is initialized with all keyworded arguments.
keywordedArgDict = keywordedArgDict.copy()
for syntheticMember in syntheticMemberList:
argumentName = syntheticMember.memberName()
# Argument is expected by the original constructor.
if argumentName in originalConstructorExpectedArgList:
continue
# We filter args only if original constructor does not expected variadic args.
if argumentName in positionalArgumentDict:
positionalArgumentKeyValueList = list(filter(lambda pair: pair[0] != argumentName,
positionalArgumentKeyValueList))
# We filter args only if original constructor does not expected keyworded args.
if argumentName in keywordedArgDict:
del keywordedArgDict[argumentName]
positionalArgumentTuple = tuple([value for _, value in positionalArgumentKeyValueList])
return positionalArgumentTuple, keywordedArgDict
|
wishtack/pysynthetic | synthetic/synthetic_constructor_factory.py | SyntheticConstructorFactory._positionalArgumentKeyValueList | python | def _positionalArgumentKeyValueList(self,
originalConstructorExpectedArgList,
syntheticMemberList,
argTuple):
# First, the list of expected arguments is set to original constructor's arg spec.
expectedArgList = copy.copy(originalConstructorExpectedArgList)
# ... then we append members that are not already present.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
if memberName not in expectedArgList:
expectedArgList.append(memberName)
# Makes a list of tuples (argumentName, argumentValue) with each element of each list (expectedArgList, argTuple)
# until the shortest list's end is reached.
positionalArgumentKeyValueList = list(zip(expectedArgList, argTuple))
# Add remanining arguments (those that are not expected by the original constructor).
for argumentValue in argTuple[len(positionalArgumentKeyValueList):]:
positionalArgumentKeyValueList.append((None, argumentValue))
return positionalArgumentKeyValueList | Transforms args tuple to a dictionary mapping argument names to values using original constructor
positional args specification, then it adds synthesized members at the end if they are not already present.
:type syntheticMemberList: list(SyntheticMember)
:type argTuple: tuple | train | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_constructor_factory.py#L85-L112 | null | class SyntheticConstructorFactory:
@contract
def makeConstructor(self, originalConstructor, syntheticMemberList, doesConsumeArguments):
"""
:type syntheticMemberList: list(SyntheticMember)
:type doesConsumeArguments: bool
"""
# Original constructor's expected args.
originalConstructorExpectedArgList = []
doesExpectVariadicArgs = False
doesExpectKeywordedArgs = False
if inspect.isfunction(originalConstructor) or inspect.ismethod(originalConstructor):
argSpec = inspect.getargspec(originalConstructor)
# originalConstructorExpectedArgList = expected args - self.
originalConstructorExpectedArgList = argSpec.args[1:]
doesExpectVariadicArgs = (argSpec.varargs is not None)
doesExpectKeywordedArgs = (argSpec.keywords is not None)
def init(instance, *args, **kwargs):
if doesConsumeArguments:
# Merge original constructor's args specification with member list and make an args dict.
positionalArgumentKeyValueList = self._positionalArgumentKeyValueList(
originalConstructorExpectedArgList,
syntheticMemberList,
args)
# Set members values.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
# Default value.
value = syntheticMember.default()
# Constructor is synthesized.
if doesConsumeArguments:
value = self._consumeArgument(memberName,
positionalArgumentKeyValueList,
kwargs,
value)
# Checking that the contract is respected.
syntheticMember.checkContract(memberName, value)
# Initalizing member with a value.
setattr(instance,
syntheticMember.privateMemberName(),
value)
if doesConsumeArguments:
# Remove superfluous arguments that have been used for synthesization but are not expected by constructor.
args, kwargs = self._filterArgsAndKwargs(
originalConstructorExpectedArgList=originalConstructorExpectedArgList,
syntheticMemberList=syntheticMemberList,
positionalArgumentKeyValueList=positionalArgumentKeyValueList,
keywordedArgDict=kwargs
)
# Call original constructor.
if originalConstructor is not None:
originalConstructor(instance, *args, **kwargs)
return init
@contract
@contract
def _consumeArgument(self,
memberName,
positionalArgumentKeyValueList,
kwargs,
defaultValue):
"""Returns member's value from kwargs if found or from positionalArgumentKeyValueList if found
or default value otherwise.
:type memberName: str
:type positionalArgumentKeyValueList: list(tuple)
:type kwargs: dict(string:*)
"""
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
if memberName in kwargs:
return kwargs[memberName]
if memberName in positionalArgumentDict:
return positionalArgumentDict[memberName]
return defaultValue
@contract
def _filterArgsAndKwargs(
self,
originalConstructorExpectedArgList,
syntheticMemberList,
positionalArgumentKeyValueList,
keywordedArgDict):
"""Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor.
If original constructor accepts variadic args, all variadic args are forwarded.
If original constructor accepts keyworded args, all keyworded args are forwarded.
:type originalConstructorExpectedArgList: list(str)
:type syntheticMemberList: list(SyntheticMember)
:type positionalArgumentKeyValueList: list(tuple)
:type keywordedArgDict: dict(string:*)
"""
# List is initialized with all variadic arguments.
positionalArgumentKeyValueList = copy.copy(positionalArgumentKeyValueList)
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
# Dict is initialized with all keyworded arguments.
keywordedArgDict = keywordedArgDict.copy()
for syntheticMember in syntheticMemberList:
argumentName = syntheticMember.memberName()
# Argument is expected by the original constructor.
if argumentName in originalConstructorExpectedArgList:
continue
# We filter args only if original constructor does not expected variadic args.
if argumentName in positionalArgumentDict:
positionalArgumentKeyValueList = list(filter(lambda pair: pair[0] != argumentName,
positionalArgumentKeyValueList))
# We filter args only if original constructor does not expected keyworded args.
if argumentName in keywordedArgDict:
del keywordedArgDict[argumentName]
positionalArgumentTuple = tuple([value for _, value in positionalArgumentKeyValueList])
return positionalArgumentTuple, keywordedArgDict
|
wishtack/pysynthetic | synthetic/synthetic_constructor_factory.py | SyntheticConstructorFactory._consumeArgument | python | def _consumeArgument(self,
memberName,
positionalArgumentKeyValueList,
kwargs,
defaultValue):
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
if memberName in kwargs:
return kwargs[memberName]
if memberName in positionalArgumentDict:
return positionalArgumentDict[memberName]
return defaultValue | Returns member's value from kwargs if found or from positionalArgumentKeyValueList if found
or default value otherwise.
:type memberName: str
:type positionalArgumentKeyValueList: list(tuple)
:type kwargs: dict(string:*) | train | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_constructor_factory.py#L115-L136 | null | class SyntheticConstructorFactory:
@contract
def makeConstructor(self, originalConstructor, syntheticMemberList, doesConsumeArguments):
"""
:type syntheticMemberList: list(SyntheticMember)
:type doesConsumeArguments: bool
"""
# Original constructor's expected args.
originalConstructorExpectedArgList = []
doesExpectVariadicArgs = False
doesExpectKeywordedArgs = False
if inspect.isfunction(originalConstructor) or inspect.ismethod(originalConstructor):
argSpec = inspect.getargspec(originalConstructor)
# originalConstructorExpectedArgList = expected args - self.
originalConstructorExpectedArgList = argSpec.args[1:]
doesExpectVariadicArgs = (argSpec.varargs is not None)
doesExpectKeywordedArgs = (argSpec.keywords is not None)
def init(instance, *args, **kwargs):
if doesConsumeArguments:
# Merge original constructor's args specification with member list and make an args dict.
positionalArgumentKeyValueList = self._positionalArgumentKeyValueList(
originalConstructorExpectedArgList,
syntheticMemberList,
args)
# Set members values.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
# Default value.
value = syntheticMember.default()
# Constructor is synthesized.
if doesConsumeArguments:
value = self._consumeArgument(memberName,
positionalArgumentKeyValueList,
kwargs,
value)
# Checking that the contract is respected.
syntheticMember.checkContract(memberName, value)
# Initalizing member with a value.
setattr(instance,
syntheticMember.privateMemberName(),
value)
if doesConsumeArguments:
# Remove superfluous arguments that have been used for synthesization but are not expected by constructor.
args, kwargs = self._filterArgsAndKwargs(
originalConstructorExpectedArgList=originalConstructorExpectedArgList,
syntheticMemberList=syntheticMemberList,
positionalArgumentKeyValueList=positionalArgumentKeyValueList,
keywordedArgDict=kwargs
)
# Call original constructor.
if originalConstructor is not None:
originalConstructor(instance, *args, **kwargs)
return init
@contract
def _positionalArgumentKeyValueList(self,
originalConstructorExpectedArgList,
syntheticMemberList,
argTuple):
"""Transforms args tuple to a dictionary mapping argument names to values using original constructor
positional args specification, then it adds synthesized members at the end if they are not already present.
:type syntheticMemberList: list(SyntheticMember)
:type argTuple: tuple
"""
# First, the list of expected arguments is set to original constructor's arg spec.
expectedArgList = copy.copy(originalConstructorExpectedArgList)
# ... then we append members that are not already present.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
if memberName not in expectedArgList:
expectedArgList.append(memberName)
# Makes a list of tuples (argumentName, argumentValue) with each element of each list (expectedArgList, argTuple)
# until the shortest list's end is reached.
positionalArgumentKeyValueList = list(zip(expectedArgList, argTuple))
# Add remanining arguments (those that are not expected by the original constructor).
for argumentValue in argTuple[len(positionalArgumentKeyValueList):]:
positionalArgumentKeyValueList.append((None, argumentValue))
return positionalArgumentKeyValueList
@contract
@contract
def _filterArgsAndKwargs(
self,
originalConstructorExpectedArgList,
syntheticMemberList,
positionalArgumentKeyValueList,
keywordedArgDict):
"""Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor.
If original constructor accepts variadic args, all variadic args are forwarded.
If original constructor accepts keyworded args, all keyworded args are forwarded.
:type originalConstructorExpectedArgList: list(str)
:type syntheticMemberList: list(SyntheticMember)
:type positionalArgumentKeyValueList: list(tuple)
:type keywordedArgDict: dict(string:*)
"""
# List is initialized with all variadic arguments.
positionalArgumentKeyValueList = copy.copy(positionalArgumentKeyValueList)
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
# Dict is initialized with all keyworded arguments.
keywordedArgDict = keywordedArgDict.copy()
for syntheticMember in syntheticMemberList:
argumentName = syntheticMember.memberName()
# Argument is expected by the original constructor.
if argumentName in originalConstructorExpectedArgList:
continue
# We filter args only if original constructor does not expected variadic args.
if argumentName in positionalArgumentDict:
positionalArgumentKeyValueList = list(filter(lambda pair: pair[0] != argumentName,
positionalArgumentKeyValueList))
# We filter args only if original constructor does not expected keyworded args.
if argumentName in keywordedArgDict:
del keywordedArgDict[argumentName]
positionalArgumentTuple = tuple([value for _, value in positionalArgumentKeyValueList])
return positionalArgumentTuple, keywordedArgDict
|
wishtack/pysynthetic | synthetic/synthetic_constructor_factory.py | SyntheticConstructorFactory._filterArgsAndKwargs | python | def _filterArgsAndKwargs(
self,
originalConstructorExpectedArgList,
syntheticMemberList,
positionalArgumentKeyValueList,
keywordedArgDict):
# List is initialized with all variadic arguments.
positionalArgumentKeyValueList = copy.copy(positionalArgumentKeyValueList)
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
# Dict is initialized with all keyworded arguments.
keywordedArgDict = keywordedArgDict.copy()
for syntheticMember in syntheticMemberList:
argumentName = syntheticMember.memberName()
# Argument is expected by the original constructor.
if argumentName in originalConstructorExpectedArgList:
continue
# We filter args only if original constructor does not expected variadic args.
if argumentName in positionalArgumentDict:
positionalArgumentKeyValueList = list(filter(lambda pair: pair[0] != argumentName,
positionalArgumentKeyValueList))
# We filter args only if original constructor does not expected keyworded args.
if argumentName in keywordedArgDict:
del keywordedArgDict[argumentName]
positionalArgumentTuple = tuple([value for _, value in positionalArgumentKeyValueList])
return positionalArgumentTuple, keywordedArgDict | Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor.
If original constructor accepts variadic args, all variadic args are forwarded.
If original constructor accepts keyworded args, all keyworded args are forwarded.
:type originalConstructorExpectedArgList: list(str)
:type syntheticMemberList: list(SyntheticMember)
:type positionalArgumentKeyValueList: list(tuple)
:type keywordedArgDict: dict(string:*) | train | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_constructor_factory.py#L139-L182 | null | class SyntheticConstructorFactory:
@contract
def makeConstructor(self, originalConstructor, syntheticMemberList, doesConsumeArguments):
"""
:type syntheticMemberList: list(SyntheticMember)
:type doesConsumeArguments: bool
"""
# Original constructor's expected args.
originalConstructorExpectedArgList = []
doesExpectVariadicArgs = False
doesExpectKeywordedArgs = False
if inspect.isfunction(originalConstructor) or inspect.ismethod(originalConstructor):
argSpec = inspect.getargspec(originalConstructor)
# originalConstructorExpectedArgList = expected args - self.
originalConstructorExpectedArgList = argSpec.args[1:]
doesExpectVariadicArgs = (argSpec.varargs is not None)
doesExpectKeywordedArgs = (argSpec.keywords is not None)
def init(instance, *args, **kwargs):
if doesConsumeArguments:
# Merge original constructor's args specification with member list and make an args dict.
positionalArgumentKeyValueList = self._positionalArgumentKeyValueList(
originalConstructorExpectedArgList,
syntheticMemberList,
args)
# Set members values.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
# Default value.
value = syntheticMember.default()
# Constructor is synthesized.
if doesConsumeArguments:
value = self._consumeArgument(memberName,
positionalArgumentKeyValueList,
kwargs,
value)
# Checking that the contract is respected.
syntheticMember.checkContract(memberName, value)
# Initalizing member with a value.
setattr(instance,
syntheticMember.privateMemberName(),
value)
if doesConsumeArguments:
# Remove superfluous arguments that have been used for synthesization but are not expected by constructor.
args, kwargs = self._filterArgsAndKwargs(
originalConstructorExpectedArgList=originalConstructorExpectedArgList,
syntheticMemberList=syntheticMemberList,
positionalArgumentKeyValueList=positionalArgumentKeyValueList,
keywordedArgDict=kwargs
)
# Call original constructor.
if originalConstructor is not None:
originalConstructor(instance, *args, **kwargs)
return init
@contract
def _positionalArgumentKeyValueList(self,
originalConstructorExpectedArgList,
syntheticMemberList,
argTuple):
"""Transforms args tuple to a dictionary mapping argument names to values using original constructor
positional args specification, then it adds synthesized members at the end if they are not already present.
:type syntheticMemberList: list(SyntheticMember)
:type argTuple: tuple
"""
# First, the list of expected arguments is set to original constructor's arg spec.
expectedArgList = copy.copy(originalConstructorExpectedArgList)
# ... then we append members that are not already present.
for syntheticMember in syntheticMemberList:
memberName = syntheticMember.memberName()
if memberName not in expectedArgList:
expectedArgList.append(memberName)
# Makes a list of tuples (argumentName, argumentValue) with each element of each list (expectedArgList, argTuple)
# until the shortest list's end is reached.
positionalArgumentKeyValueList = list(zip(expectedArgList, argTuple))
# Add remanining arguments (those that are not expected by the original constructor).
for argumentValue in argTuple[len(positionalArgumentKeyValueList):]:
positionalArgumentKeyValueList.append((None, argumentValue))
return positionalArgumentKeyValueList
@contract
def _consumeArgument(self,
memberName,
positionalArgumentKeyValueList,
kwargs,
defaultValue):
"""Returns member's value from kwargs if found or from positionalArgumentKeyValueList if found
or default value otherwise.
:type memberName: str
:type positionalArgumentKeyValueList: list(tuple)
:type kwargs: dict(string:*)
"""
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
if memberName in kwargs:
return kwargs[memberName]
if memberName in positionalArgumentDict:
return positionalArgumentDict[memberName]
return defaultValue
@contract
|
wishtack/pysynthetic | synthetic/synthetic_class_controller.py | SyntheticClassController.setNamingConvention | python | def setNamingConvention(self, namingConvention):
# Remove getters and setters with old naming convention.
self._removeSyntheticMembers()
# Set new naming convention.
self._syntheticMetaData().setNamingConvention(namingConvention)
# Update constructor and recreate accessors.
self._updateConstructorAndMembers() | :type namingConvention: INamingConvention | train | https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_class_controller.py#L53-L64 | [
"def _syntheticMetaData(self):\n # SyntheticMetaData does not exist...\n syntheticMetaDataName = '__syntheticMetaData__{className}'.format(className=self._class.__name__)\n if not hasattr(self._class, syntheticMetaDataName):\n # ...we create it.\n originalConstructor = getattr(self._class, '_... | class SyntheticClassController:
def __init__(self, cls):
self._constructorFactory = SyntheticConstructorFactory()
self._comparisonFactory = SyntheticComparisonFactory()
self._class = cls
@contract
def addSyntheticMember(self, syntheticMember):
"""
:type syntheticMember: SyntheticMember
"""
# Inserting this member at the beginning of the member list of synthesization data attribute
# because decorators are called in reversed order.
self._syntheticMetaData().insertSyntheticMemberAtBegin(syntheticMember)
# Update constructor and recreate accessors.
self._updateConstructorAndMembers()
def synthesizeConstructor(self):
self._syntheticMetaData().setConsumeArguments(True)
# Update constructor and recreate accessors.
self._updateConstructorAndMembers()
def synthesizeEquality(self):
self._syntheticMetaData().setEqualityGeneration(True)
# Update constructor and recreate accessors.
self._updateConstructorAndMembers()
@contract
def setNamingConvention(self, namingConvention):
"""
:type namingConvention: INamingConvention
"""
# Remove getters and setters with old naming convention.
self._removeSyntheticMembers()
# Set new naming convention.
self._syntheticMetaData().setNamingConvention(namingConvention)
# Update constructor and recreate accessors.
self._updateConstructorAndMembers()
def _syntheticMetaData(self):
# SyntheticMetaData does not exist...
syntheticMetaDataName = '__syntheticMetaData__{className}'.format(className=self._class.__name__)
if not hasattr(self._class, syntheticMetaDataName):
# ...we create it.
originalConstructor = getattr(self._class, '__init__', None)
originalEqualFunction = getattr(self._class, '__eq__', None)
originalNotEqualFunction = getattr(self._class, '__ne__', None)
originalHashFunction = getattr(self._class, '__hash__', None)
# List of existing methods (Python2: ismethod, Python3: isfunction).
originalMemberList = inspect.getmembers(self._class)
originalMemberNameList = [method[0] for method in originalMemberList]
# Making the synthetic meta data.
syntheticMetaData = SyntheticMetaData(cls = self._class,
originalConstructor = originalConstructor,
originalEqualFunction= originalEqualFunction,
originalNotEqualFunction= originalNotEqualFunction,
originalHashFuction= originalHashFunction,
originalMemberNameList = originalMemberNameList)
setattr(self._class, syntheticMetaDataName, syntheticMetaData)
return getattr(self._class, syntheticMetaDataName)
def _updateConstructorAndMembers(self):
"""We overwrite constructor and accessors every time because the constructor might have to consume all
members even if their decorator is below the "synthesizeConstructor" decorator and it also might need to update
the getters and setters because the naming convention has changed.
"""
syntheticMetaData = self._syntheticMetaData()
constructor = self._constructorFactory.makeConstructor(syntheticMetaData.originalConstructor(),
syntheticMetaData.syntheticMemberList(),
syntheticMetaData.doesConsumeArguments())
self._class.__init__ = constructor
for syntheticMember in syntheticMetaData.syntheticMemberList():
syntheticMember.apply(self._class,
syntheticMetaData.originalMemberNameList(),
syntheticMetaData.namingConvention())
if syntheticMetaData.hasEqualityGeneration():
eq = self._comparisonFactory.makeEqualFunction(syntheticMetaData.originalEqualFunction(),
syntheticMetaData.syntheticMemberList())
ne = self._comparisonFactory.makeNotEqualFunction(syntheticMetaData.originalNotEqualFunction(),
syntheticMetaData.syntheticMemberList())
hashFunc = self._comparisonFactory.makeHashFunction(syntheticMetaData.originalHashFunction(),
syntheticMetaData.syntheticMemberList())
self._class.__eq__ = eq
self._class.__ne__ = ne
self._class.__hash__ = hashFunc
def _removeSyntheticMembers(self):
syntheticMetaData = self._syntheticMetaData()
for syntheticMember in syntheticMetaData.syntheticMemberList():
syntheticMember.remove(self._class,
syntheticMetaData.originalMemberNameList(),
syntheticMetaData.namingConvention())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.