repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS.set_file | def set_file(self, filename):
""" Analyse the file with the captured content """
# Use the file name as prefix if none is given
if self.output_prefix is None:
_, self.output_prefix = os.path.split(filename)
# Check if the file is present, since rdpcap will not do that
if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
print 'The file \'{0}\' is either not present or not readable. '\
'Exiting!'.format(filename)
sys.exit(1)
try:
packets = rdpcap(filename)
except NameError:
# Due probably to a bug in rdpcap, this kind of error raises a
# NameError, because the exception that is tried to raise, is not
# defined
print 'The file \'{}\' is not a pcap capture file. Exiting!'\
.format(filename)
sys.exit(2)
for number, packet in enumerate(packets):
# See if there is a field called load
self._debug('\nNUMBER {0}'.format(number), no_prefix=True)
try:
# Will cause AttributeError if there is no load
packet.getfieldval('load')
# Get the full load
load = packet.sprintf('%TCP.payload%')
self._debug('PAYLOAD LENGTH {0}'.format(len(load)),
no_prefix=True)
self._debug(load, load=True)
self._parse_load(load)
except AttributeError:
self._debug('LOAD EXCEPTION', no_prefix=True)
if len(self.messages) > 0 and not self.messages[-1].write_closed:
self._debug('DELETE LAST OPEN FILE')
del self.messages[-1]
if self.args.debug_analysis:
sys.exit(0) | python | def set_file(self, filename):
""" Analyse the file with the captured content """
# Use the file name as prefix if none is given
if self.output_prefix is None:
_, self.output_prefix = os.path.split(filename)
# Check if the file is present, since rdpcap will not do that
if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
print 'The file \'{0}\' is either not present or not readable. '\
'Exiting!'.format(filename)
sys.exit(1)
try:
packets = rdpcap(filename)
except NameError:
# Due probably to a bug in rdpcap, this kind of error raises a
# NameError, because the exception that is tried to raise, is not
# defined
print 'The file \'{}\' is not a pcap capture file. Exiting!'\
.format(filename)
sys.exit(2)
for number, packet in enumerate(packets):
# See if there is a field called load
self._debug('\nNUMBER {0}'.format(number), no_prefix=True)
try:
# Will cause AttributeError if there is no load
packet.getfieldval('load')
# Get the full load
load = packet.sprintf('%TCP.payload%')
self._debug('PAYLOAD LENGTH {0}'.format(len(load)),
no_prefix=True)
self._debug(load, load=True)
self._parse_load(load)
except AttributeError:
self._debug('LOAD EXCEPTION', no_prefix=True)
if len(self.messages) > 0 and not self.messages[-1].write_closed:
self._debug('DELETE LAST OPEN FILE')
del self.messages[-1]
if self.args.debug_analysis:
sys.exit(0) | [
"def",
"set_file",
"(",
"self",
",",
"filename",
")",
":",
"# Use the file name as prefix if none is given",
"if",
"self",
".",
"output_prefix",
"is",
"None",
":",
"_",
",",
"self",
".",
"output_prefix",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
... | Analyse the file with the captured content | [
"Analyse",
"the",
"file",
"with",
"the",
"captured",
"content"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L86-L125 | train | 214,800 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS._parse_load | def _parse_load(self, load):
""" Parse the load from a single packet """
# If the load is ??
if load in ['??']:
self._debug('IGNORING')
# If there is a start in load
elif any([start in load for start in STARTS]):
self._debug('START')
self.messages.append(WSPart(load, self.args))
# and there is also an end
if any([end in load for end in ENDS]):
self.messages[-1].finalize_content()
self._debug('AND END')
# If there is an end in load
elif any([end in load for end in ENDS]):
# If there is an open WSPart
if len(self.messages) > 0 and not\
self.messages[-1].write_closed:
self._debug('END ON OPEN FILE')
self.messages[-1].add_content(load)
self.messages[-1].finalize_content()
# Ignore ends before start
else:
self._debug('END BUT NO OPEN FILE')
else:
# If there is an open WSPart
if len(self.messages) > 0 and not\
self.messages[-1].write_closed:
self._debug('ADD TO OPEN FILE')
self.messages[-1].add_content(load)
# else ignore
else:
self._debug('NOTHING TO DO') | python | def _parse_load(self, load):
""" Parse the load from a single packet """
# If the load is ??
if load in ['??']:
self._debug('IGNORING')
# If there is a start in load
elif any([start in load for start in STARTS]):
self._debug('START')
self.messages.append(WSPart(load, self.args))
# and there is also an end
if any([end in load for end in ENDS]):
self.messages[-1].finalize_content()
self._debug('AND END')
# If there is an end in load
elif any([end in load for end in ENDS]):
# If there is an open WSPart
if len(self.messages) > 0 and not\
self.messages[-1].write_closed:
self._debug('END ON OPEN FILE')
self.messages[-1].add_content(load)
self.messages[-1].finalize_content()
# Ignore ends before start
else:
self._debug('END BUT NO OPEN FILE')
else:
# If there is an open WSPart
if len(self.messages) > 0 and not\
self.messages[-1].write_closed:
self._debug('ADD TO OPEN FILE')
self.messages[-1].add_content(load)
# else ignore
else:
self._debug('NOTHING TO DO') | [
"def",
"_parse_load",
"(",
"self",
",",
"load",
")",
":",
"# If the load is ??",
"if",
"load",
"in",
"[",
"'??'",
"]",
":",
"self",
".",
"_debug",
"(",
"'IGNORING'",
")",
"# If there is a start in load",
"elif",
"any",
"(",
"[",
"start",
"in",
"load",
"for... | Parse the load from a single packet | [
"Parse",
"the",
"load",
"from",
"a",
"single",
"packet"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L127-L159 | train | 214,801 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS._debug | def _debug(self, message, load=False, no_prefix=False):
""" Output debug information """
if self.args.debug_analysis:
if load:
message = '\r\n'.join(
['# ' + line for line in message.strip().split('\r\n')]
)
print '{0}\n{1}\n{0}'.format('#' * 78, message)
else:
# If open message and no_prefix is False
if (len(self.messages) > 0 and not
self.messages[-1].write_closed) and not no_prefix:
print '--OPEN--> {0}'.format(message)
else:
print message | python | def _debug(self, message, load=False, no_prefix=False):
""" Output debug information """
if self.args.debug_analysis:
if load:
message = '\r\n'.join(
['# ' + line for line in message.strip().split('\r\n')]
)
print '{0}\n{1}\n{0}'.format('#' * 78, message)
else:
# If open message and no_prefix is False
if (len(self.messages) > 0 and not
self.messages[-1].write_closed) and not no_prefix:
print '--OPEN--> {0}'.format(message)
else:
print message | [
"def",
"_debug",
"(",
"self",
",",
"message",
",",
"load",
"=",
"False",
",",
"no_prefix",
"=",
"False",
")",
":",
"if",
"self",
".",
"args",
".",
"debug_analysis",
":",
"if",
"load",
":",
"message",
"=",
"'\\r\\n'",
".",
"join",
"(",
"[",
"'# '",
... | Output debug information | [
"Output",
"debug",
"information"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L161-L175 | train | 214,802 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS.to_file_mode | def to_file_mode(self):
""" Write all the messages to files """
for message_no in range(len(self.messages)):
self.__to_file(message_no) | python | def to_file_mode(self):
""" Write all the messages to files """
for message_no in range(len(self.messages)):
self.__to_file(message_no) | [
"def",
"to_file_mode",
"(",
"self",
")",
":",
"for",
"message_no",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"messages",
")",
")",
":",
"self",
".",
"__to_file",
"(",
"message_no",
")"
] | Write all the messages to files | [
"Write",
"all",
"the",
"messages",
"to",
"files"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L177-L180 | train | 214,803 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS.__to_file | def __to_file(self, message_no):
""" Write a single message to file """
filename = self.__create_file_name(message_no)
try:
with codecs.open(filename, mode='w',
encoding=self.messages[message_no].encoding)\
as file__:
file__.write(self.messages[message_no].output)
except IOError as excep:
print 'Unable for open the file \'{0}\' for writing. The '\
'following exception was raised:'.format(filename)
print excep
print 'Exiting!'
sys.exit(2)
return filename | python | def __to_file(self, message_no):
""" Write a single message to file """
filename = self.__create_file_name(message_no)
try:
with codecs.open(filename, mode='w',
encoding=self.messages[message_no].encoding)\
as file__:
file__.write(self.messages[message_no].output)
except IOError as excep:
print 'Unable for open the file \'{0}\' for writing. The '\
'following exception was raised:'.format(filename)
print excep
print 'Exiting!'
sys.exit(2)
return filename | [
"def",
"__to_file",
"(",
"self",
",",
"message_no",
")",
":",
"filename",
"=",
"self",
".",
"__create_file_name",
"(",
"message_no",
")",
"try",
":",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"self",
... | Write a single message to file | [
"Write",
"a",
"single",
"message",
"to",
"file"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L182-L196 | train | 214,804 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS.__create_file_name | def __create_file_name(self, message_no):
""" Create the filename to save to """
cwd = os.getcwd()
filename = '{0}_{1}.xml'.format(self.output_prefix, message_no)
return os.path.join(cwd, filename) | python | def __create_file_name(self, message_no):
""" Create the filename to save to """
cwd = os.getcwd()
filename = '{0}_{1}.xml'.format(self.output_prefix, message_no)
return os.path.join(cwd, filename) | [
"def",
"__create_file_name",
"(",
"self",
",",
"message_no",
")",
":",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"filename",
"=",
"'{0}_{1}.xml'",
".",
"format",
"(",
"self",
".",
"output_prefix",
",",
"message_no",
")",
"return",
"os",
".",
"path",
"."... | Create the filename to save to | [
"Create",
"the",
"filename",
"to",
"save",
"to"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L198-L202 | train | 214,805 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS.to_browser_mode | def to_browser_mode(self):
""" Write all the messages to files and open them in the browser """
for message_no in range(len(self.messages)):
self.__to_browser(message_no) | python | def to_browser_mode(self):
""" Write all the messages to files and open them in the browser """
for message_no in range(len(self.messages)):
self.__to_browser(message_no) | [
"def",
"to_browser_mode",
"(",
"self",
")",
":",
"for",
"message_no",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"messages",
")",
")",
":",
"self",
".",
"__to_browser",
"(",
"message_no",
")"
] | Write all the messages to files and open them in the browser | [
"Write",
"all",
"the",
"messages",
"to",
"files",
"and",
"open",
"them",
"in",
"the",
"browser"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L204-L207 | train | 214,806 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS.__to_browser | def __to_browser(self, message_no):
""" Write a single message to file and open the file in a
browser
"""
filename = self.__to_file(message_no)
try:
command = self.config.get('General', 'browser_command')
except (ConfigParser.NoOptionError, AttributeError):
print 'Incorrect or missing .ini file. See --help.'
sys.exit(5)
command = str(command).format(filename)
command_list = command.split(' ')
try:
subprocess.Popen(command_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
print 'Unable to execute the browsercommand:'
print command
print 'Exiting!'
sys.exit(21) | python | def __to_browser(self, message_no):
""" Write a single message to file and open the file in a
browser
"""
filename = self.__to_file(message_no)
try:
command = self.config.get('General', 'browser_command')
except (ConfigParser.NoOptionError, AttributeError):
print 'Incorrect or missing .ini file. See --help.'
sys.exit(5)
command = str(command).format(filename)
command_list = command.split(' ')
try:
subprocess.Popen(command_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
print 'Unable to execute the browsercommand:'
print command
print 'Exiting!'
sys.exit(21) | [
"def",
"__to_browser",
"(",
"self",
",",
"message_no",
")",
":",
"filename",
"=",
"self",
".",
"__to_file",
"(",
"message_no",
")",
"try",
":",
"command",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'General'",
",",
"'browser_command'",
")",
"except",
... | Write a single message to file and open the file in a
browser | [
"Write",
"a",
"single",
"message",
"to",
"file",
"and",
"open",
"the",
"file",
"in",
"a",
"browser"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L209-L229 | train | 214,807 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS.__update_window | def __update_window(self, width, height, message_no, page_no):
""" Update the window with the menu and the new text """
file_exists_label = '-F-ILE'
if not os.path.exists(self.__create_file_name(message_no)):
file_exists_label = '(f)ile'
# Clear the screen
if PLATFORM == 'win32':
# Ugly hack until someone figures out a better way for Windows
# probably something with a cls command, but I cannot test it
for _ in range(50):
print
else:
sys.stdout.write('\x1b[2J\x1b[H') # Clear screen
# Content
content = self.messages[message_no].output.rstrip('\n')
out = content
if self.args.color:
out = pygments.highlight(content, XmlLexer(), TerminalFormatter())
# Paging functionality
if message_no not in self.pages:
self._form_pages(message_no, content, out, height, width)
# Coerce in range
page_no = max(min(len(self.pages[message_no]) - 1, page_no), 0)
page_content = self.pages[message_no][page_no]
# Menu
max_message = str(len(self.messages) - 1)
position_string = u'{{0: >{0}}}/{{1: <{0}}}'.format(len(max_message))
position_string = position_string.format(message_no, max_message)
# Assume less than 100 pages
current_max_page = len(self.pages[message_no]) - 1
pages_string = u'{0: >2}/{1: <2}'.format(page_no, current_max_page)
menu = (u'(b)rowser | {0} | Message {1} \u2193 (s)\u2191 (w) | '
u'Page {2} \u2190 (a)\u2192 (d) | (q)uit\n{3}').\
format(file_exists_label, position_string, pages_string,
'-' * width)
print menu
print page_content
return page_no | python | def __update_window(self, width, height, message_no, page_no):
""" Update the window with the menu and the new text """
file_exists_label = '-F-ILE'
if not os.path.exists(self.__create_file_name(message_no)):
file_exists_label = '(f)ile'
# Clear the screen
if PLATFORM == 'win32':
# Ugly hack until someone figures out a better way for Windows
# probably something with a cls command, but I cannot test it
for _ in range(50):
print
else:
sys.stdout.write('\x1b[2J\x1b[H') # Clear screen
# Content
content = self.messages[message_no].output.rstrip('\n')
out = content
if self.args.color:
out = pygments.highlight(content, XmlLexer(), TerminalFormatter())
# Paging functionality
if message_no not in self.pages:
self._form_pages(message_no, content, out, height, width)
# Coerce in range
page_no = max(min(len(self.pages[message_no]) - 1, page_no), 0)
page_content = self.pages[message_no][page_no]
# Menu
max_message = str(len(self.messages) - 1)
position_string = u'{{0: >{0}}}/{{1: <{0}}}'.format(len(max_message))
position_string = position_string.format(message_no, max_message)
# Assume less than 100 pages
current_max_page = len(self.pages[message_no]) - 1
pages_string = u'{0: >2}/{1: <2}'.format(page_no, current_max_page)
menu = (u'(b)rowser | {0} | Message {1} \u2193 (s)\u2191 (w) | '
u'Page {2} \u2190 (a)\u2192 (d) | (q)uit\n{3}').\
format(file_exists_label, position_string, pages_string,
'-' * width)
print menu
print page_content
return page_no | [
"def",
"__update_window",
"(",
"self",
",",
"width",
",",
"height",
",",
"message_no",
",",
"page_no",
")",
":",
"file_exists_label",
"=",
"'-F-ILE'",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"__create_file_name",
"(",
"message_no",
... | Update the window with the menu and the new text | [
"Update",
"the",
"window",
"with",
"the",
"menu",
"and",
"the",
"new",
"text"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L268-L310 | train | 214,808 |
SoCo/SoCo | dev_tools/analyse_ws.py | AnalyzeWS._form_pages | def _form_pages(self, message_no, content, out, height, width):
""" Form the pages """
self.pages[message_no] = []
page_height = height - 4 # 2-3 for menu, 1 for cursor
outline = u''
no_lines_page = 0
for original, formatted in zip(content.split('\n'), out.split('\n')):
no_lines_original = int(math.ceil(len(original) / float(width)))
# Blank line
if len(original) == 0:
if no_lines_page + 1 <= page_height:
outline += u'\n'
no_lines_page += 1
else:
self.pages[message_no].append(outline)
outline = u'\n'
no_lines_page = 1
original = formatted = u'\n'
# Too large line
elif no_lines_original > page_height:
if len(outline) > 0:
self.pages[message_no].append(outline)
outline = u''
no_lines_page = 0
self.pages[message_no].append(formatted)
# The line(s) can be added to the current page
elif no_lines_page + no_lines_original <= page_height:
if len(outline) > 0:
outline += u'\n'
outline += formatted
no_lines_page += no_lines_original
# End the page and start a new
else:
self.pages[message_no].append(outline)
outline = formatted
no_lines_page = no_lines_original
# Add the remainder
if len(outline) > 0:
self.pages[message_no].append(outline)
if len(self.pages[message_no]) == 0:
self.pages[message_no].append(u'') | python | def _form_pages(self, message_no, content, out, height, width):
""" Form the pages """
self.pages[message_no] = []
page_height = height - 4 # 2-3 for menu, 1 for cursor
outline = u''
no_lines_page = 0
for original, formatted in zip(content.split('\n'), out.split('\n')):
no_lines_original = int(math.ceil(len(original) / float(width)))
# Blank line
if len(original) == 0:
if no_lines_page + 1 <= page_height:
outline += u'\n'
no_lines_page += 1
else:
self.pages[message_no].append(outline)
outline = u'\n'
no_lines_page = 1
original = formatted = u'\n'
# Too large line
elif no_lines_original > page_height:
if len(outline) > 0:
self.pages[message_no].append(outline)
outline = u''
no_lines_page = 0
self.pages[message_no].append(formatted)
# The line(s) can be added to the current page
elif no_lines_page + no_lines_original <= page_height:
if len(outline) > 0:
outline += u'\n'
outline += formatted
no_lines_page += no_lines_original
# End the page and start a new
else:
self.pages[message_no].append(outline)
outline = formatted
no_lines_page = no_lines_original
# Add the remainder
if len(outline) > 0:
self.pages[message_no].append(outline)
if len(self.pages[message_no]) == 0:
self.pages[message_no].append(u'') | [
"def",
"_form_pages",
"(",
"self",
",",
"message_no",
",",
"content",
",",
"out",
",",
"height",
",",
"width",
")",
":",
"self",
".",
"pages",
"[",
"message_no",
"]",
"=",
"[",
"]",
"page_height",
"=",
"height",
"-",
"4",
"# 2-3 for menu, 1 for cursor",
... | Form the pages | [
"Form",
"the",
"pages"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L312-L353 | train | 214,809 |
SoCo/SoCo | dev_tools/analyse_ws.py | WSPart.finalize_content | def finalize_content(self):
""" Finalize the additons """
self.write_closed = True
body = self.raw_body.decode(self.encoding)
self._init_xml(body)
self._form_output() | python | def finalize_content(self):
""" Finalize the additons """
self.write_closed = True
body = self.raw_body.decode(self.encoding)
self._init_xml(body)
self._form_output() | [
"def",
"finalize_content",
"(",
"self",
")",
":",
"self",
".",
"write_closed",
"=",
"True",
"body",
"=",
"self",
".",
"raw_body",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
"self",
".",
"_init_xml",
"(",
"body",
")",
"self",
".",
"_form_output",
... | Finalize the additons | [
"Finalize",
"the",
"additons"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L382-L387 | train | 214,810 |
SoCo/SoCo | dev_tools/analyse_ws.py | WSPart._init_xml | def _init_xml(self, body):
""" Parse the present body as xml """
tree = etree.fromstring(body.encode(self.encoding), PARSER)
# Extract and replace inner DIDL xml in tags
for text in tree.xpath('.//text()[contains(., "DIDL")]'):
item = text.getparent()
didl_tree = etree.fromstring(item.text)
if self.external_inner_xml:
item.text = 'DIDL_REPLACEMENT_{0}'.format(len(self.inner_xml))
self.inner_xml.append(didl_tree)
else:
item.text = None
item.append(didl_tree)
# Extract and replace inner DIDL xml in properties in inner xml
for inner_tree in self.inner_xml:
for item in inner_tree.xpath('//*[contains(@val, "DIDL")]'):
if self.external_inner_xml:
didl_tree = etree.fromstring(item.attrib['val'])
item.attrib['val'] = 'DIDL_REPLACEMENT_{0}'.\
format(len(self.inner_xml))
self.inner_xml.append(didl_tree)
self.body_formatted = etree.tostring(tree, pretty_print=True).decode(
self.encoding) | python | def _init_xml(self, body):
""" Parse the present body as xml """
tree = etree.fromstring(body.encode(self.encoding), PARSER)
# Extract and replace inner DIDL xml in tags
for text in tree.xpath('.//text()[contains(., "DIDL")]'):
item = text.getparent()
didl_tree = etree.fromstring(item.text)
if self.external_inner_xml:
item.text = 'DIDL_REPLACEMENT_{0}'.format(len(self.inner_xml))
self.inner_xml.append(didl_tree)
else:
item.text = None
item.append(didl_tree)
# Extract and replace inner DIDL xml in properties in inner xml
for inner_tree in self.inner_xml:
for item in inner_tree.xpath('//*[contains(@val, "DIDL")]'):
if self.external_inner_xml:
didl_tree = etree.fromstring(item.attrib['val'])
item.attrib['val'] = 'DIDL_REPLACEMENT_{0}'.\
format(len(self.inner_xml))
self.inner_xml.append(didl_tree)
self.body_formatted = etree.tostring(tree, pretty_print=True).decode(
self.encoding) | [
"def",
"_init_xml",
"(",
"self",
",",
"body",
")",
":",
"tree",
"=",
"etree",
".",
"fromstring",
"(",
"body",
".",
"encode",
"(",
"self",
".",
"encoding",
")",
",",
"PARSER",
")",
"# Extract and replace inner DIDL xml in tags",
"for",
"text",
"in",
"tree",
... | Parse the present body as xml | [
"Parse",
"the",
"present",
"body",
"as",
"xml"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L389-L413 | train | 214,811 |
SoCo/SoCo | dev_tools/analyse_ws.py | WSPart._form_output | def _form_output(self):
""" Form the output """
self.output = u''
if self.external_inner_xml:
self.output += u'<Dummy_tag_to_create_valid_xml_on_external_inner'\
'_xml>\n'
self.output += u'<!-- BODY -->\n{0}'.format(self.body_formatted)
if self.external_inner_xml:
for number, didl in enumerate(self.inner_xml):
self.output += u'\n<!-- DIDL_{0} -->\n{1}'.\
format(number, etree.tostring(didl, pretty_print=True))
self.output += u'</Dummy_tag_to_create_valid_xml_on_external_'\
'inner_xml>' | python | def _form_output(self):
""" Form the output """
self.output = u''
if self.external_inner_xml:
self.output += u'<Dummy_tag_to_create_valid_xml_on_external_inner'\
'_xml>\n'
self.output += u'<!-- BODY -->\n{0}'.format(self.body_formatted)
if self.external_inner_xml:
for number, didl in enumerate(self.inner_xml):
self.output += u'\n<!-- DIDL_{0} -->\n{1}'.\
format(number, etree.tostring(didl, pretty_print=True))
self.output += u'</Dummy_tag_to_create_valid_xml_on_external_'\
'inner_xml>' | [
"def",
"_form_output",
"(",
"self",
")",
":",
"self",
".",
"output",
"=",
"u''",
"if",
"self",
".",
"external_inner_xml",
":",
"self",
".",
"output",
"+=",
"u'<Dummy_tag_to_create_valid_xml_on_external_inner'",
"'_xml>\\n'",
"self",
".",
"output",
"+=",
"u'<!-- BO... | Form the output | [
"Form",
"the",
"output"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L418-L431 | train | 214,812 |
SoCo/SoCo | soco/data_structures_entry.py | attempt_datastructure_upgrade | def attempt_datastructure_upgrade(didl_item):
"""Attempt to upgrade a didl_item to a music services data structure
if it originates from a music services
"""
try:
resource = didl_item.resources[0]
except IndexError:
_LOG.debug('Upgrade not possible, no resources')
return didl_item
if resource.uri.startswith('x-sonos-http'):
# Get data
uri = resource.uri
# Now we need to create a DIDL item id. It seems to be based on the uri
path = urlparse(uri).path
# Strip any extensions, eg .mp3, from the end of the path
path = path.rsplit('.', 1)[0]
# The ID has an 8 (hex) digit prefix. But it doesn't seem to
# matter what it is!
item_id = '11111111{0}'.format(path)
# Ignore other metadata for now, in future ask ms data
# structure to upgrade metadata from the service
metadata = {}
try:
metadata['title'] = didl_item.title
except AttributeError:
pass
# Get class
try:
cls = get_class(DIDL_NAME_TO_QUALIFIED_MS_NAME[
didl_item.__class__.__name__
])
except KeyError:
# The data structure should be upgraded, but there is an entry
# missing from DIDL_NAME_TO_QUALIFIED_MS_NAME. Log this as a
# warning.
_LOG.warning(
'DATA STRUCTURE UPGRADE FAIL. Unable to upgrade music library '
'data structure to music service data structure because an '
'entry is missing for %s in DIDL_NAME_TO_QUALIFIED_MS_NAME. '
'This should be reported as a bug.',
didl_item.__class__.__name__,
)
return didl_item
upgraded_item = cls(
item_id=item_id,
desc=desc_from_uri(resource.uri),
resources=didl_item.resources,
uri=uri,
metadata_dict=metadata,
)
_LOG.debug("Item %s upgraded to %s", didl_item, upgraded_item)
return upgraded_item
_LOG.debug('Upgrade not necessary')
return didl_item | python | def attempt_datastructure_upgrade(didl_item):
"""Attempt to upgrade a didl_item to a music services data structure
if it originates from a music services
"""
try:
resource = didl_item.resources[0]
except IndexError:
_LOG.debug('Upgrade not possible, no resources')
return didl_item
if resource.uri.startswith('x-sonos-http'):
# Get data
uri = resource.uri
# Now we need to create a DIDL item id. It seems to be based on the uri
path = urlparse(uri).path
# Strip any extensions, eg .mp3, from the end of the path
path = path.rsplit('.', 1)[0]
# The ID has an 8 (hex) digit prefix. But it doesn't seem to
# matter what it is!
item_id = '11111111{0}'.format(path)
# Ignore other metadata for now, in future ask ms data
# structure to upgrade metadata from the service
metadata = {}
try:
metadata['title'] = didl_item.title
except AttributeError:
pass
# Get class
try:
cls = get_class(DIDL_NAME_TO_QUALIFIED_MS_NAME[
didl_item.__class__.__name__
])
except KeyError:
# The data structure should be upgraded, but there is an entry
# missing from DIDL_NAME_TO_QUALIFIED_MS_NAME. Log this as a
# warning.
_LOG.warning(
'DATA STRUCTURE UPGRADE FAIL. Unable to upgrade music library '
'data structure to music service data structure because an '
'entry is missing for %s in DIDL_NAME_TO_QUALIFIED_MS_NAME. '
'This should be reported as a bug.',
didl_item.__class__.__name__,
)
return didl_item
upgraded_item = cls(
item_id=item_id,
desc=desc_from_uri(resource.uri),
resources=didl_item.resources,
uri=uri,
metadata_dict=metadata,
)
_LOG.debug("Item %s upgraded to %s", didl_item, upgraded_item)
return upgraded_item
_LOG.debug('Upgrade not necessary')
return didl_item | [
"def",
"attempt_datastructure_upgrade",
"(",
"didl_item",
")",
":",
"try",
":",
"resource",
"=",
"didl_item",
".",
"resources",
"[",
"0",
"]",
"except",
"IndexError",
":",
"_LOG",
".",
"debug",
"(",
"'Upgrade not possible, no resources'",
")",
"return",
"didl_item... | Attempt to upgrade a didl_item to a music services data structure
if it originates from a music services | [
"Attempt",
"to",
"upgrade",
"a",
"didl_item",
"to",
"a",
"music",
"services",
"data",
"structure",
"if",
"it",
"originates",
"from",
"a",
"music",
"services"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/data_structures_entry.py#L76-L135 | train | 214,813 |
SoCo/SoCo | soco/plugins/__init__.py | SoCoPlugin.from_name | def from_name(cls, fullname, soco, *args, **kwargs):
"""Instantiate a plugin by its full name."""
_LOG.info('Loading plugin %s', fullname)
parts = fullname.split('.')
modname = '.'.join(parts[:-1])
clsname = parts[-1]
mod = importlib.import_module(modname)
class_ = getattr(mod, clsname)
_LOG.info('Loaded class %s', class_)
return class_(soco, *args, **kwargs) | python | def from_name(cls, fullname, soco, *args, **kwargs):
"""Instantiate a plugin by its full name."""
_LOG.info('Loading plugin %s', fullname)
parts = fullname.split('.')
modname = '.'.join(parts[:-1])
clsname = parts[-1]
mod = importlib.import_module(modname)
class_ = getattr(mod, clsname)
_LOG.info('Loaded class %s', class_)
return class_(soco, *args, **kwargs) | [
"def",
"from_name",
"(",
"cls",
",",
"fullname",
",",
"soco",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_LOG",
".",
"info",
"(",
"'Loading plugin %s'",
",",
"fullname",
")",
"parts",
"=",
"fullname",
".",
"split",
"(",
"'.'",
")",
"modnam... | Instantiate a plugin by its full name. | [
"Instantiate",
"a",
"plugin",
"by",
"its",
"full",
"name",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/plugins/__init__.py#L34-L48 | train | 214,814 |
SoCo/SoCo | soco/ms_data_structures.py | get_ms_item | def get_ms_item(xml, service, parent_id):
"""Return the music service item that corresponds to xml.
The class is identified by getting the type from the 'itemType' tag
"""
cls = MS_TYPE_TO_CLASS.get(xml.findtext(ns_tag('ms', 'itemType')))
out = cls.from_xml(xml, service, parent_id)
return out | python | def get_ms_item(xml, service, parent_id):
"""Return the music service item that corresponds to xml.
The class is identified by getting the type from the 'itemType' tag
"""
cls = MS_TYPE_TO_CLASS.get(xml.findtext(ns_tag('ms', 'itemType')))
out = cls.from_xml(xml, service, parent_id)
return out | [
"def",
"get_ms_item",
"(",
"xml",
",",
"service",
",",
"parent_id",
")",
":",
"cls",
"=",
"MS_TYPE_TO_CLASS",
".",
"get",
"(",
"xml",
".",
"findtext",
"(",
"ns_tag",
"(",
"'ms'",
",",
"'itemType'",
")",
")",
")",
"out",
"=",
"cls",
".",
"from_xml",
"... | Return the music service item that corresponds to xml.
The class is identified by getting the type from the 'itemType' tag | [
"Return",
"the",
"music",
"service",
"item",
"that",
"corresponds",
"to",
"xml",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/ms_data_structures.py#L21-L28 | train | 214,815 |
SoCo/SoCo | soco/ms_data_structures.py | tags_with_text | def tags_with_text(xml, tags=None):
"""Return a list of tags that contain text retrieved recursively from an
XML tree."""
if tags is None:
tags = []
for element in xml:
if element.text is not None:
tags.append(element)
elif len(element) > 0: # pylint: disable=len-as-condition
tags_with_text(element, tags)
else:
message = 'Unknown XML structure: {}'.format(element)
raise ValueError(message)
return tags | python | def tags_with_text(xml, tags=None):
"""Return a list of tags that contain text retrieved recursively from an
XML tree."""
if tags is None:
tags = []
for element in xml:
if element.text is not None:
tags.append(element)
elif len(element) > 0: # pylint: disable=len-as-condition
tags_with_text(element, tags)
else:
message = 'Unknown XML structure: {}'.format(element)
raise ValueError(message)
return tags | [
"def",
"tags_with_text",
"(",
"xml",
",",
"tags",
"=",
"None",
")",
":",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"]",
"for",
"element",
"in",
"xml",
":",
"if",
"element",
".",
"text",
"is",
"not",
"None",
":",
"tags",
".",
"append",
"(... | Return a list of tags that contain text retrieved recursively from an
XML tree. | [
"Return",
"a",
"list",
"of",
"tags",
"that",
"contain",
"text",
"retrieved",
"recursively",
"from",
"an",
"XML",
"tree",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/ms_data_structures.py#L31-L44 | train | 214,816 |
SoCo/SoCo | soco/ms_data_structures.py | MusicServiceItem.from_xml | def from_xml(cls, xml, service, parent_id):
"""Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata>
"""
# Add a few extra pieces of information
content = {'description': service.description,
'service_id': service.service_id,
'parent_id': parent_id}
# Extract values from the XML
all_text_elements = tags_with_text(xml)
for item in all_text_elements:
tag = item.tag[len(NAMESPACES['ms']) + 2:] # Strip namespace
tag = camel_to_underscore(tag) # Convert to nice names
if tag not in cls.valid_fields:
message = 'The info tag \'{}\' is not allowed for this item'.\
format(tag)
raise ValueError(message)
content[tag] = item.text
# Convert values for known types
for key, value in content.items():
if key == 'duration':
content[key] = int(value)
if key in ['can_play', 'can_skip', 'can_add_to_favorites',
'can_enumerate']:
content[key] = True if value == 'true' else False
# Rename a single item
content['item_id'] = content.pop('id')
# And get the extended id
content['extended_id'] = service.id_to_extended_id(content['item_id'],
cls)
# Add URI if there is one for the relevant class
uri = service.form_uri(content, cls)
if uri:
content['uri'] = uri
# Check for all required values
for key in cls.required_fields:
if key not in content:
message = 'An XML field that correspond to the key \'{}\' '\
'is required. See the docstring for help.'.format(key)
return cls.from_dict(content) | python | def from_xml(cls, xml, service, parent_id):
"""Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata>
"""
# Add a few extra pieces of information
content = {'description': service.description,
'service_id': service.service_id,
'parent_id': parent_id}
# Extract values from the XML
all_text_elements = tags_with_text(xml)
for item in all_text_elements:
tag = item.tag[len(NAMESPACES['ms']) + 2:] # Strip namespace
tag = camel_to_underscore(tag) # Convert to nice names
if tag not in cls.valid_fields:
message = 'The info tag \'{}\' is not allowed for this item'.\
format(tag)
raise ValueError(message)
content[tag] = item.text
# Convert values for known types
for key, value in content.items():
if key == 'duration':
content[key] = int(value)
if key in ['can_play', 'can_skip', 'can_add_to_favorites',
'can_enumerate']:
content[key] = True if value == 'true' else False
# Rename a single item
content['item_id'] = content.pop('id')
# And get the extended id
content['extended_id'] = service.id_to_extended_id(content['item_id'],
cls)
# Add URI if there is one for the relevant class
uri = service.form_uri(content, cls)
if uri:
content['uri'] = uri
# Check for all required values
for key in cls.required_fields:
if key not in content:
message = 'An XML field that correspond to the key \'{}\' '\
'is required. See the docstring for help.'.format(key)
return cls.from_dict(content) | [
"def",
"from_xml",
"(",
"cls",
",",
"xml",
",",
"service",
",",
"parent_id",
")",
":",
"# Add a few extra pieces of information",
"content",
"=",
"{",
"'description'",
":",
"service",
".",
"description",
",",
"'service_id'",
":",
"service",
".",
"service_id",
",... | Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata> | [
"Return",
"a",
"Music",
"Service",
"item",
"generated",
"from",
"xml",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/ms_data_structures.py#L61-L148 | train | 214,817 |
SoCo/SoCo | soco/ms_data_structures.py | MusicServiceItem.from_dict | def from_dict(cls, dict_in):
"""Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict
"""
kwargs = dict_in.copy()
args = [kwargs.pop(key) for key in cls.required_fields]
return cls(*args, **kwargs) | python | def from_dict(cls, dict_in):
"""Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict
"""
kwargs = dict_in.copy()
args = [kwargs.pop(key) for key in cls.required_fields]
return cls(*args, **kwargs) | [
"def",
"from_dict",
"(",
"cls",
",",
"dict_in",
")",
":",
"kwargs",
"=",
"dict_in",
".",
"copy",
"(",
")",
"args",
"=",
"[",
"kwargs",
".",
"pop",
"(",
"key",
")",
"for",
"key",
"in",
"cls",
".",
"required_fields",
"]",
"return",
"cls",
"(",
"*",
... | Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict | [
"Initialize",
"the",
"class",
"from",
"a",
"dict",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/ms_data_structures.py#L151-L160 | train | 214,818 |
SoCo/SoCo | soco/ms_data_structures.py | MusicServiceItem.didl_metadata | def didl_metadata(self):
"""Return the DIDL metadata for a Music Service Track.
The metadata is on the form:
.. code :: xml
<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
<item id="...self.extended_id..."
parentID="...self.parent_id..."
restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
self.content['description']
</desc>
</item>
</DIDL-Lite>
"""
# Check if this item is meant to be played
if not self.can_play:
message = 'This item is not meant to be played and therefore '\
'also not to create its own didl_metadata'
raise DIDLMetadataError(message)
# Check if we have the attributes to create the didl metadata:
for key in ['extended_id', 'title', 'item_class']:
if not hasattr(self, key):
message = 'The property \'{}\' is not present on this item. '\
'This indicates that this item was not meant to create '\
'didl_metadata'.format(key)
raise DIDLMetadataError(message)
if 'description' not in self.content:
message = 'The item for \'description\' is not present in '\
'self.content. This indicates that this item was not meant '\
'to create didl_metadata'
raise DIDLMetadataError(message)
# Main element, ugly? yes! but I have given up on using namespaces
# with xml.etree.ElementTree
item_attrib = {
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:upnp': 'urn:schemas-upnp-org:metadata-1-0/upnp/',
'xmlns:r': 'urn:schemas-rinconnetworks-com:metadata-1-0/',
'xmlns': 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'
}
xml = XML.Element('DIDL-Lite', item_attrib)
# Item sub element
item_attrib = {
'parentID': '',
'restricted': 'true',
'id': self.extended_id
}
# Only add the parent_id if we have it
if self.parent_id:
item_attrib['parentID'] = self.parent_id
item = XML.SubElement(xml, 'item', item_attrib)
# Add title and class
XML.SubElement(item, 'dc:title').text = self.title
XML.SubElement(item, 'upnp:class').text = self.item_class
# Add the desc element
desc_attrib = {
'id': 'cdudn',
'nameSpace': 'urn:schemas-rinconnetworks-com:metadata-1-0/'
}
desc = XML.SubElement(item, 'desc', desc_attrib)
desc.text = self.content['description']
return xml | python | def didl_metadata(self):
"""Return the DIDL metadata for a Music Service Track.
The metadata is on the form:
.. code :: xml
<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
<item id="...self.extended_id..."
parentID="...self.parent_id..."
restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
self.content['description']
</desc>
</item>
</DIDL-Lite>
"""
# Check if this item is meant to be played
if not self.can_play:
message = 'This item is not meant to be played and therefore '\
'also not to create its own didl_metadata'
raise DIDLMetadataError(message)
# Check if we have the attributes to create the didl metadata:
for key in ['extended_id', 'title', 'item_class']:
if not hasattr(self, key):
message = 'The property \'{}\' is not present on this item. '\
'This indicates that this item was not meant to create '\
'didl_metadata'.format(key)
raise DIDLMetadataError(message)
if 'description' not in self.content:
message = 'The item for \'description\' is not present in '\
'self.content. This indicates that this item was not meant '\
'to create didl_metadata'
raise DIDLMetadataError(message)
# Main element, ugly? yes! but I have given up on using namespaces
# with xml.etree.ElementTree
item_attrib = {
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:upnp': 'urn:schemas-upnp-org:metadata-1-0/upnp/',
'xmlns:r': 'urn:schemas-rinconnetworks-com:metadata-1-0/',
'xmlns': 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'
}
xml = XML.Element('DIDL-Lite', item_attrib)
# Item sub element
item_attrib = {
'parentID': '',
'restricted': 'true',
'id': self.extended_id
}
# Only add the parent_id if we have it
if self.parent_id:
item_attrib['parentID'] = self.parent_id
item = XML.SubElement(xml, 'item', item_attrib)
# Add title and class
XML.SubElement(item, 'dc:title').text = self.title
XML.SubElement(item, 'upnp:class').text = self.item_class
# Add the desc element
desc_attrib = {
'id': 'cdudn',
'nameSpace': 'urn:schemas-rinconnetworks-com:metadata-1-0/'
}
desc = XML.SubElement(item, 'desc', desc_attrib)
desc.text = self.content['description']
return xml | [
"def",
"didl_metadata",
"(",
"self",
")",
":",
"# Check if this item is meant to be played",
"if",
"not",
"self",
".",
"can_play",
":",
"message",
"=",
"'This item is not meant to be played and therefore '",
"'also not to create its own didl_metadata'",
"raise",
"DIDLMetadataErro... | Return the DIDL metadata for a Music Service Track.
The metadata is on the form:
.. code :: xml
<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
<item id="...self.extended_id..."
parentID="...self.parent_id..."
restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
self.content['description']
</desc>
</item>
</DIDL-Lite> | [
"Return",
"the",
"DIDL",
"metadata",
"for",
"a",
"Music",
"Service",
"Track",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/ms_data_structures.py#L213-L285 | train | 214,819 |
SoCo/SoCo | soco/alarms.py | get_alarms | def get_alarms(zone=None):
"""Get a set of all alarms known to the Sonos system.
Args:
zone (`SoCo`, optional): a SoCo instance to query. If None, a random
instance is used. Defaults to `None`.
Returns:
set: A set of `Alarm` instances
Note:
Any existing `Alarm` instance will have its attributes updated to those
currently stored on the Sonos system.
"""
# Get a soco instance to query. It doesn't matter which.
if zone is None:
zone = discovery.any_soco()
response = zone.alarmClock.ListAlarms()
alarm_list = response['CurrentAlarmList']
tree = XML.fromstring(alarm_list.encode('utf-8'))
# An alarm list looks like this:
# <Alarms>
# <Alarm ID="14" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ1400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# <Alarm ID="15" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ01400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# </Alarms>
# pylint: disable=protected-access
alarms = tree.findall('Alarm')
result = set()
for alarm in alarms:
values = alarm.attrib
alarm_id = values['ID']
# If an instance already exists for this ID, update and return it.
# Otherwise, create a new one and populate its values
if Alarm._all_alarms.get(alarm_id):
instance = Alarm._all_alarms.get(alarm_id)
else:
instance = Alarm(None)
instance._alarm_id = alarm_id
Alarm._all_alarms[instance._alarm_id] = instance
instance.start_time = datetime.strptime(
values['StartTime'], "%H:%M:%S").time() # NB StartTime, not
# StartLocalTime, which is used by CreateAlarm
instance.duration = None if values['Duration'] == '' else\
datetime.strptime(values['Duration'], "%H:%M:%S").time()
instance.recurrence = values['Recurrence']
instance.enabled = values['Enabled'] == '1'
instance.zone = next((z for z in zone.all_zones
if z.uid == values['RoomUUID']), None)
# some alarms are not associated to zones -> filter these out
if instance.zone is None:
continue
instance.program_uri = None if values['ProgramURI'] ==\
"x-rincon-buzzer:0" else values['ProgramURI']
instance.program_metadata = values['ProgramMetaData']
instance.play_mode = values['PlayMode']
instance.volume = values['Volume']
instance.include_linked_zones = values['IncludeLinkedZones'] == '1'
result.add(instance)
return result | python | def get_alarms(zone=None):
"""Get a set of all alarms known to the Sonos system.
Args:
zone (`SoCo`, optional): a SoCo instance to query. If None, a random
instance is used. Defaults to `None`.
Returns:
set: A set of `Alarm` instances
Note:
Any existing `Alarm` instance will have its attributes updated to those
currently stored on the Sonos system.
"""
# Get a soco instance to query. It doesn't matter which.
if zone is None:
zone = discovery.any_soco()
response = zone.alarmClock.ListAlarms()
alarm_list = response['CurrentAlarmList']
tree = XML.fromstring(alarm_list.encode('utf-8'))
# An alarm list looks like this:
# <Alarms>
# <Alarm ID="14" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ1400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# <Alarm ID="15" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ01400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# </Alarms>
# pylint: disable=protected-access
alarms = tree.findall('Alarm')
result = set()
for alarm in alarms:
values = alarm.attrib
alarm_id = values['ID']
# If an instance already exists for this ID, update and return it.
# Otherwise, create a new one and populate its values
if Alarm._all_alarms.get(alarm_id):
instance = Alarm._all_alarms.get(alarm_id)
else:
instance = Alarm(None)
instance._alarm_id = alarm_id
Alarm._all_alarms[instance._alarm_id] = instance
instance.start_time = datetime.strptime(
values['StartTime'], "%H:%M:%S").time() # NB StartTime, not
# StartLocalTime, which is used by CreateAlarm
instance.duration = None if values['Duration'] == '' else\
datetime.strptime(values['Duration'], "%H:%M:%S").time()
instance.recurrence = values['Recurrence']
instance.enabled = values['Enabled'] == '1'
instance.zone = next((z for z in zone.all_zones
if z.uid == values['RoomUUID']), None)
# some alarms are not associated to zones -> filter these out
if instance.zone is None:
continue
instance.program_uri = None if values['ProgramURI'] ==\
"x-rincon-buzzer:0" else values['ProgramURI']
instance.program_metadata = values['ProgramMetaData']
instance.play_mode = values['PlayMode']
instance.volume = values['Volume']
instance.include_linked_zones = values['IncludeLinkedZones'] == '1'
result.add(instance)
return result | [
"def",
"get_alarms",
"(",
"zone",
"=",
"None",
")",
":",
"# Get a soco instance to query. It doesn't matter which.",
"if",
"zone",
"is",
"None",
":",
"zone",
"=",
"discovery",
".",
"any_soco",
"(",
")",
"response",
"=",
"zone",
".",
"alarmClock",
".",
"ListAlarm... | Get a set of all alarms known to the Sonos system.
Args:
zone (`SoCo`, optional): a SoCo instance to query. If None, a random
instance is used. Defaults to `None`.
Returns:
set: A set of `Alarm` instances
Note:
Any existing `Alarm` instance will have its attributes updated to those
currently stored on the Sonos system. | [
"Get",
"a",
"set",
"of",
"all",
"alarms",
"known",
"to",
"the",
"Sonos",
"system",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/alarms.py#L253-L325 | train | 214,820 |
SoCo/SoCo | soco/alarms.py | Alarm.play_mode | def play_mode(self, play_mode):
"""See `playmode`."""
play_mode = play_mode.upper()
if play_mode not in PLAY_MODES:
raise KeyError("'%s' is not a valid play mode" % play_mode)
self._play_mode = play_mode | python | def play_mode(self, play_mode):
"""See `playmode`."""
play_mode = play_mode.upper()
if play_mode not in PLAY_MODES:
raise KeyError("'%s' is not a valid play mode" % play_mode)
self._play_mode = play_mode | [
"def",
"play_mode",
"(",
"self",
",",
"play_mode",
")",
":",
"play_mode",
"=",
"play_mode",
".",
"upper",
"(",
")",
"if",
"play_mode",
"not",
"in",
"PLAY_MODES",
":",
"raise",
"KeyError",
"(",
"\"'%s' is not a valid play mode\"",
"%",
"play_mode",
")",
"self",... | See `playmode`. | [
"See",
"playmode",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/alarms.py#L166-L171 | train | 214,821 |
SoCo/SoCo | soco/alarms.py | Alarm.volume | def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) | python | def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) | [
"def",
"volume",
"(",
"self",
",",
"volume",
")",
":",
"# max 100",
"volume",
"=",
"int",
"(",
"volume",
")",
"self",
".",
"_volume",
"=",
"max",
"(",
"0",
",",
"min",
"(",
"volume",
",",
"100",
")",
")"
] | See `volume`. | [
"See",
"volume",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/alarms.py#L179-L183 | train | 214,822 |
SoCo/SoCo | soco/alarms.py | Alarm.recurrence | def recurrence(self, recurrence):
"""See `recurrence`."""
if not is_valid_recurrence(recurrence):
raise KeyError("'%s' is not a valid recurrence value" % recurrence)
self._recurrence = recurrence | python | def recurrence(self, recurrence):
"""See `recurrence`."""
if not is_valid_recurrence(recurrence):
raise KeyError("'%s' is not a valid recurrence value" % recurrence)
self._recurrence = recurrence | [
"def",
"recurrence",
"(",
"self",
",",
"recurrence",
")",
":",
"if",
"not",
"is_valid_recurrence",
"(",
"recurrence",
")",
":",
"raise",
"KeyError",
"(",
"\"'%s' is not a valid recurrence value\"",
"%",
"recurrence",
")",
"self",
".",
"_recurrence",
"=",
"recurren... | See `recurrence`. | [
"See",
"recurrence",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/alarms.py#L197-L202 | train | 214,823 |
SoCo/SoCo | soco/alarms.py | Alarm.save | def save(self):
"""Save the alarm to the Sonos system.
Raises:
~soco.exceptions.SoCoUPnPException: if the alarm cannot be created
because there
is already an alarm for this room at the specified time.
"""
# pylint: disable=bad-continuation
args = [
('StartLocalTime', self.start_time.strftime(TIME_FORMAT)),
('Duration', '' if self.duration is None else
self.duration.strftime(TIME_FORMAT)),
('Recurrence', self.recurrence),
('Enabled', '1' if self.enabled else '0'),
('RoomUUID', self.zone.uid),
('ProgramURI', "x-rincon-buzzer:0" if self.program_uri is None
else self.program_uri),
('ProgramMetaData', self.program_metadata),
('PlayMode', self.play_mode),
('Volume', self.volume),
('IncludeLinkedZones', '1' if self.include_linked_zones else '0')
]
if self._alarm_id is None:
response = self.zone.alarmClock.CreateAlarm(args)
self._alarm_id = response['AssignedID']
Alarm._all_alarms[self._alarm_id] = self
else:
# The alarm has been saved before. Update it instead.
args.insert(0, ('ID', self._alarm_id))
self.zone.alarmClock.UpdateAlarm(args) | python | def save(self):
"""Save the alarm to the Sonos system.
Raises:
~soco.exceptions.SoCoUPnPException: if the alarm cannot be created
because there
is already an alarm for this room at the specified time.
"""
# pylint: disable=bad-continuation
args = [
('StartLocalTime', self.start_time.strftime(TIME_FORMAT)),
('Duration', '' if self.duration is None else
self.duration.strftime(TIME_FORMAT)),
('Recurrence', self.recurrence),
('Enabled', '1' if self.enabled else '0'),
('RoomUUID', self.zone.uid),
('ProgramURI', "x-rincon-buzzer:0" if self.program_uri is None
else self.program_uri),
('ProgramMetaData', self.program_metadata),
('PlayMode', self.play_mode),
('Volume', self.volume),
('IncludeLinkedZones', '1' if self.include_linked_zones else '0')
]
if self._alarm_id is None:
response = self.zone.alarmClock.CreateAlarm(args)
self._alarm_id = response['AssignedID']
Alarm._all_alarms[self._alarm_id] = self
else:
# The alarm has been saved before. Update it instead.
args.insert(0, ('ID', self._alarm_id))
self.zone.alarmClock.UpdateAlarm(args) | [
"def",
"save",
"(",
"self",
")",
":",
"# pylint: disable=bad-continuation",
"args",
"=",
"[",
"(",
"'StartLocalTime'",
",",
"self",
".",
"start_time",
".",
"strftime",
"(",
"TIME_FORMAT",
")",
")",
",",
"(",
"'Duration'",
",",
"''",
"if",
"self",
".",
"dur... | Save the alarm to the Sonos system.
Raises:
~soco.exceptions.SoCoUPnPException: if the alarm cannot be created
because there
is already an alarm for this room at the specified time. | [
"Save",
"the",
"alarm",
"to",
"the",
"Sonos",
"system",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/alarms.py#L204-L234 | train | 214,824 |
SoCo/SoCo | soco/alarms.py | Alarm.remove | def remove(self):
"""Remove the alarm from the Sonos system.
There is no need to call `save`. The Python instance is not deleted,
and can be saved back to Sonos again if desired.
"""
self.zone.alarmClock.DestroyAlarm([
('ID', self._alarm_id)
])
alarm_id = self._alarm_id
try:
del Alarm._all_alarms[alarm_id]
except KeyError:
pass
self._alarm_id = None | python | def remove(self):
"""Remove the alarm from the Sonos system.
There is no need to call `save`. The Python instance is not deleted,
and can be saved back to Sonos again if desired.
"""
self.zone.alarmClock.DestroyAlarm([
('ID', self._alarm_id)
])
alarm_id = self._alarm_id
try:
del Alarm._all_alarms[alarm_id]
except KeyError:
pass
self._alarm_id = None | [
"def",
"remove",
"(",
"self",
")",
":",
"self",
".",
"zone",
".",
"alarmClock",
".",
"DestroyAlarm",
"(",
"[",
"(",
"'ID'",
",",
"self",
".",
"_alarm_id",
")",
"]",
")",
"alarm_id",
"=",
"self",
".",
"_alarm_id",
"try",
":",
"del",
"Alarm",
".",
"_... | Remove the alarm from the Sonos system.
There is no need to call `save`. The Python instance is not deleted,
and can be saved back to Sonos again if desired. | [
"Remove",
"the",
"alarm",
"from",
"the",
"Sonos",
"system",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/alarms.py#L236-L250 | train | 214,825 |
SoCo/SoCo | dev_tools/sonosdump.py | main | def main():
""" Run the main script """
parser = argparse.ArgumentParser(
prog='',
description='Dump data about Sonos services'
)
parser.add_argument(
'-d', '--device',
default=None,
help="The ip address of the device to query. "
"If none is supplied, a random device will be used"
)
parser.add_argument(
'-s', '--service',
default=None,
help="Dump data relating to services matching this regexp "
"only, e.g. %(prog)s -s GroupRenderingControl"
)
args = parser.parse_args()
# get a zone player - any one will do
if args.device:
device = soco.SoCo(args.device)
else:
device = soco.discovery.any_soco()
print("Querying %s" % device.player_name)
# loop over each of the available services
# pylint: disable=no-member
services = (srv(device) for srv in soco.services.Service.__subclasses__())
for srv in services:
if args.service is None or re.search(
args.service, srv.service_type):
print_details(srv) | python | def main():
""" Run the main script """
parser = argparse.ArgumentParser(
prog='',
description='Dump data about Sonos services'
)
parser.add_argument(
'-d', '--device',
default=None,
help="The ip address of the device to query. "
"If none is supplied, a random device will be used"
)
parser.add_argument(
'-s', '--service',
default=None,
help="Dump data relating to services matching this regexp "
"only, e.g. %(prog)s -s GroupRenderingControl"
)
args = parser.parse_args()
# get a zone player - any one will do
if args.device:
device = soco.SoCo(args.device)
else:
device = soco.discovery.any_soco()
print("Querying %s" % device.player_name)
# loop over each of the available services
# pylint: disable=no-member
services = (srv(device) for srv in soco.services.Service.__subclasses__())
for srv in services:
if args.service is None or re.search(
args.service, srv.service_type):
print_details(srv) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"''",
",",
"description",
"=",
"'Dump data about Sonos services'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--device'",
",",
"default",
"=",
"Non... | Run the main script | [
"Run",
"the",
"main",
"script"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/sonosdump.py#L15-L49 | train | 214,826 |
SoCo/SoCo | dev_tools/sonosdump.py | print_details | def print_details(srv):
""" Print the details of a service
"""
name = srv.service_type
box = "=" * 79
print("{0}\n|{1:^77}|\n{0}\n".format(box, name))
for action in srv.iter_actions():
print(action.name)
print("~" * len(action.name))
print("\n Input")
for arg in action.in_args:
print(" ", arg)
print("\n Output")
for arg in action.out_args:
print(" ", arg)
print("\n\n") | python | def print_details(srv):
""" Print the details of a service
"""
name = srv.service_type
box = "=" * 79
print("{0}\n|{1:^77}|\n{0}\n".format(box, name))
for action in srv.iter_actions():
print(action.name)
print("~" * len(action.name))
print("\n Input")
for arg in action.in_args:
print(" ", arg)
print("\n Output")
for arg in action.out_args:
print(" ", arg)
print("\n\n") | [
"def",
"print_details",
"(",
"srv",
")",
":",
"name",
"=",
"srv",
".",
"service_type",
"box",
"=",
"\"=\"",
"*",
"79",
"print",
"(",
"\"{0}\\n|{1:^77}|\\n{0}\\n\"",
".",
"format",
"(",
"box",
",",
"name",
")",
")",
"for",
"action",
"in",
"srv",
".",
"i... | Print the details of a service | [
"Print",
"the",
"details",
"of",
"a",
"service"
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/sonosdump.py#L52-L68 | train | 214,827 |
SoCo/SoCo | soco/snapshot.py | Snapshot.snapshot | def snapshot(self):
"""Record and store the current state of a device.
Returns:
bool: `True` if the device is a coordinator, `False` otherwise.
Useful for determining whether playing an alert on a device
will ungroup it.
"""
# get if device coordinator (or slave) True (or False)
self.is_coordinator = self.device.is_coordinator
# Get information about the currently playing media
media_info = self.device.avTransport.GetMediaInfo([('InstanceID', 0)])
self.media_uri = media_info['CurrentURI']
# Extract source from media uri - below some media URI value examples:
# 'x-rincon-queue:RINCON_000E5859E49601400#0'
# - playing a local queue always #0 for local queue)
#
# 'x-rincon-queue:RINCON_000E5859E49601400#6'
# - playing a cloud queue where #x changes with each queue)
#
# -'x-rincon:RINCON_000E5859E49601400'
# - a slave player pointing to coordinator player
if self.media_uri.split(':')[0] == 'x-rincon-queue':
# The pylint error below is a false positive, see about removing it
# in the future
# pylint: disable=simplifiable-if-statement
if self.media_uri.split('#')[1] == '0':
# playing local queue
self.is_playing_queue = True
else:
# playing cloud queue - started from Alexa
self.is_playing_cloud_queue = True
# Save the volume, mute and other sound settings
self.volume = self.device.volume
self.mute = self.device.mute
self.bass = self.device.bass
self.treble = self.device.treble
self.loudness = self.device.loudness
# get details required for what's playing:
if self.is_playing_queue:
# playing from queue - save repeat, random, cross fade, track, etc.
self.play_mode = self.device.play_mode
self.cross_fade = self.device.cross_fade
# Get information about the currently playing track
track_info = self.device.get_current_track_info()
if track_info is not None:
position = track_info['playlist_position']
if position != "":
# save as integer
self.playlist_position = int(position)
self.track_position = track_info['position']
else:
# playing from a stream - save media metadata
self.media_metadata = media_info['CurrentURIMetaData']
# Work out what the playing state is - if a coordinator
if self.is_coordinator:
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
self.transport_state = transport_info[
'current_transport_state']
# Save of the current queue if we need to
self._save_queue()
# return if device is a coordinator (helps usage)
return self.is_coordinator | python | def snapshot(self):
"""Record and store the current state of a device.
Returns:
bool: `True` if the device is a coordinator, `False` otherwise.
Useful for determining whether playing an alert on a device
will ungroup it.
"""
# get if device coordinator (or slave) True (or False)
self.is_coordinator = self.device.is_coordinator
# Get information about the currently playing media
media_info = self.device.avTransport.GetMediaInfo([('InstanceID', 0)])
self.media_uri = media_info['CurrentURI']
# Extract source from media uri - below some media URI value examples:
# 'x-rincon-queue:RINCON_000E5859E49601400#0'
# - playing a local queue always #0 for local queue)
#
# 'x-rincon-queue:RINCON_000E5859E49601400#6'
# - playing a cloud queue where #x changes with each queue)
#
# -'x-rincon:RINCON_000E5859E49601400'
# - a slave player pointing to coordinator player
if self.media_uri.split(':')[0] == 'x-rincon-queue':
# The pylint error below is a false positive, see about removing it
# in the future
# pylint: disable=simplifiable-if-statement
if self.media_uri.split('#')[1] == '0':
# playing local queue
self.is_playing_queue = True
else:
# playing cloud queue - started from Alexa
self.is_playing_cloud_queue = True
# Save the volume, mute and other sound settings
self.volume = self.device.volume
self.mute = self.device.mute
self.bass = self.device.bass
self.treble = self.device.treble
self.loudness = self.device.loudness
# get details required for what's playing:
if self.is_playing_queue:
# playing from queue - save repeat, random, cross fade, track, etc.
self.play_mode = self.device.play_mode
self.cross_fade = self.device.cross_fade
# Get information about the currently playing track
track_info = self.device.get_current_track_info()
if track_info is not None:
position = track_info['playlist_position']
if position != "":
# save as integer
self.playlist_position = int(position)
self.track_position = track_info['position']
else:
# playing from a stream - save media metadata
self.media_metadata = media_info['CurrentURIMetaData']
# Work out what the playing state is - if a coordinator
if self.is_coordinator:
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
self.transport_state = transport_info[
'current_transport_state']
# Save of the current queue if we need to
self._save_queue()
# return if device is a coordinator (helps usage)
return self.is_coordinator | [
"def",
"snapshot",
"(",
"self",
")",
":",
"# get if device coordinator (or slave) True (or False)",
"self",
".",
"is_coordinator",
"=",
"self",
".",
"device",
".",
"is_coordinator",
"# Get information about the currently playing media",
"media_info",
"=",
"self",
".",
"devi... | Record and store the current state of a device.
Returns:
bool: `True` if the device is a coordinator, `False` otherwise.
Useful for determining whether playing an alert on a device
will ungroup it. | [
"Record",
"and",
"store",
"the",
"current",
"state",
"of",
"a",
"device",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/snapshot.py#L87-L158 | train | 214,828 |
SoCo/SoCo | soco/snapshot.py | Snapshot._save_queue | def _save_queue(self):
"""Save the current state of the queue."""
if self.queue is not None:
# Maximum batch is 486, anything larger will still only
# return 486
batch_size = 400
total = 0
num_return = batch_size
# Need to get all the tracks in batches, but Only get the next
# batch if all the items requested were in the last batch
while num_return == batch_size:
queue_items = self.device.get_queue(total, batch_size)
# Check how many entries were returned
num_return = len(queue_items)
# Make sure the queue is not empty
if num_return > 0:
self.queue.append(queue_items)
# Update the total that have been processed
total = total + num_return | python | def _save_queue(self):
"""Save the current state of the queue."""
if self.queue is not None:
# Maximum batch is 486, anything larger will still only
# return 486
batch_size = 400
total = 0
num_return = batch_size
# Need to get all the tracks in batches, but Only get the next
# batch if all the items requested were in the last batch
while num_return == batch_size:
queue_items = self.device.get_queue(total, batch_size)
# Check how many entries were returned
num_return = len(queue_items)
# Make sure the queue is not empty
if num_return > 0:
self.queue.append(queue_items)
# Update the total that have been processed
total = total + num_return | [
"def",
"_save_queue",
"(",
"self",
")",
":",
"if",
"self",
".",
"queue",
"is",
"not",
"None",
":",
"# Maximum batch is 486, anything larger will still only",
"# return 486",
"batch_size",
"=",
"400",
"total",
"=",
"0",
"num_return",
"=",
"batch_size",
"# Need to get... | Save the current state of the queue. | [
"Save",
"the",
"current",
"state",
"of",
"the",
"queue",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/snapshot.py#L255-L274 | train | 214,829 |
SoCo/SoCo | soco/snapshot.py | Snapshot._restore_queue | def _restore_queue(self):
"""Restore the previous state of the queue.
Note:
The restore currently adds the items back into the queue
using the URI, for items the Sonos system already knows about
this is OK, but for other items, they may be missing some of
their metadata as it will not be automatically picked up.
"""
if self.queue is not None:
# Clear the queue so that it can be reset
self.device.clear_queue()
# Now loop around all the queue entries adding them
for queue_group in self.queue:
for queue_item in queue_group:
self.device.add_uri_to_queue(queue_item.uri) | python | def _restore_queue(self):
"""Restore the previous state of the queue.
Note:
The restore currently adds the items back into the queue
using the URI, for items the Sonos system already knows about
this is OK, but for other items, they may be missing some of
their metadata as it will not be automatically picked up.
"""
if self.queue is not None:
# Clear the queue so that it can be reset
self.device.clear_queue()
# Now loop around all the queue entries adding them
for queue_group in self.queue:
for queue_item in queue_group:
self.device.add_uri_to_queue(queue_item.uri) | [
"def",
"_restore_queue",
"(",
"self",
")",
":",
"if",
"self",
".",
"queue",
"is",
"not",
"None",
":",
"# Clear the queue so that it can be reset",
"self",
".",
"device",
".",
"clear_queue",
"(",
")",
"# Now loop around all the queue entries adding them",
"for",
"queue... | Restore the previous state of the queue.
Note:
The restore currently adds the items back into the queue
using the URI, for items the Sonos system already knows about
this is OK, but for other items, they may be missing some of
their metadata as it will not be automatically picked up. | [
"Restore",
"the",
"previous",
"state",
"of",
"the",
"queue",
"."
] | 671937e07d7973b78c0cbee153d4f3ad68ec48c6 | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/snapshot.py#L276-L291 | train | 214,830 |
ContextLab/hypertools | hypertools/_shared/params.py | default_params | def default_params(model, update_dict=None):
"""
Loads and updates default model parameters
Parameters
----------
model : str
The name of a model
update_dict : dict
A dict to update default parameters
Returns
----------
params : dict
A dictionary of parameters
"""
if model in parameters:
params = parameters[model].copy()
else:
params = None
if update_dict:
if params is None:
params = {}
params.update(update_dict)
return params | python | def default_params(model, update_dict=None):
"""
Loads and updates default model parameters
Parameters
----------
model : str
The name of a model
update_dict : dict
A dict to update default parameters
Returns
----------
params : dict
A dictionary of parameters
"""
if model in parameters:
params = parameters[model].copy()
else:
params = None
if update_dict:
if params is None:
params = {}
params.update(update_dict)
return params | [
"def",
"default_params",
"(",
"model",
",",
"update_dict",
"=",
"None",
")",
":",
"if",
"model",
"in",
"parameters",
":",
"params",
"=",
"parameters",
"[",
"model",
"]",
".",
"copy",
"(",
")",
"else",
":",
"params",
"=",
"None",
"if",
"update_dict",
":... | Loads and updates default model parameters
Parameters
----------
model : str
The name of a model
update_dict : dict
A dict to update default parameters
Returns
----------
params : dict
A dictionary of parameters | [
"Loads",
"and",
"updates",
"default",
"model",
"parameters"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_shared/params.py#L18-L48 | train | 214,831 |
ContextLab/hypertools | hypertools/tools/describe.py | describe | def describe(x, reduce='IncrementalPCA', max_dims=None, show=True,
format_data=True):
"""
Create plot describing covariance with as a function of number of dimensions
This function correlates the raw data with reduced data to get a sense
for how well the data can be summarized with n dimensions. Useful for
evaluating quality of dimensionality reduced plots.
Parameters
----------
x : Numpy array, DataFrame or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
max_dims : int
Maximum number of dimensions to consider
show : bool
Plot the result (default : true)
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
result : dict
A dictionary with the analysis results. 'average' is the correlation
by number of components for all data. 'individual' is a list of lists,
where each list is a correlation by number of components vector (for each
input list).
"""
warnings.warn('When input data is large, this computation can take a long time.')
def summary(x, max_dims=None):
# if data is a list, stack it
if type(x) is list:
x = np.vstack(x)
# if max dims is not set, make it the length of the minimum number of columns
if max_dims is None:
if x.shape[1]>x.shape[0]:
max_dims = x.shape[0]
else:
max_dims = x.shape[1]
# correlation matrix for all dimensions
alldims = get_cdist(x)
corrs=[]
for dims in range(2, max_dims):
reduced = get_cdist(reducer(x, ndims=dims, reduce=reduce))
corrs.append(get_corr(alldims, reduced))
del reduced
return corrs
# common format
if format_data:
x = formatter(x, ppca=True)
# a dictionary to store results
result = {}
result['average'] = summary(x, max_dims)
result['individual'] = [summary(x_i, max_dims) for x_i in x]
if max_dims is None:
max_dims = len(result['average'])
# if show, plot it
if show:
fig, ax = plt.subplots()
ax = sns.tsplot(data=result['individual'], time=[i for i in range(2, max_dims+2)], err_style="unit_traces")
ax.set_title('Correlation with raw data by number of components')
ax.set_ylabel('Correlation')
ax.set_xlabel('Number of components')
plt.show()
return result | python | def describe(x, reduce='IncrementalPCA', max_dims=None, show=True,
format_data=True):
"""
Create plot describing covariance with as a function of number of dimensions
This function correlates the raw data with reduced data to get a sense
for how well the data can be summarized with n dimensions. Useful for
evaluating quality of dimensionality reduced plots.
Parameters
----------
x : Numpy array, DataFrame or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
max_dims : int
Maximum number of dimensions to consider
show : bool
Plot the result (default : true)
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
result : dict
A dictionary with the analysis results. 'average' is the correlation
by number of components for all data. 'individual' is a list of lists,
where each list is a correlation by number of components vector (for each
input list).
"""
warnings.warn('When input data is large, this computation can take a long time.')
def summary(x, max_dims=None):
# if data is a list, stack it
if type(x) is list:
x = np.vstack(x)
# if max dims is not set, make it the length of the minimum number of columns
if max_dims is None:
if x.shape[1]>x.shape[0]:
max_dims = x.shape[0]
else:
max_dims = x.shape[1]
# correlation matrix for all dimensions
alldims = get_cdist(x)
corrs=[]
for dims in range(2, max_dims):
reduced = get_cdist(reducer(x, ndims=dims, reduce=reduce))
corrs.append(get_corr(alldims, reduced))
del reduced
return corrs
# common format
if format_data:
x = formatter(x, ppca=True)
# a dictionary to store results
result = {}
result['average'] = summary(x, max_dims)
result['individual'] = [summary(x_i, max_dims) for x_i in x]
if max_dims is None:
max_dims = len(result['average'])
# if show, plot it
if show:
fig, ax = plt.subplots()
ax = sns.tsplot(data=result['individual'], time=[i for i in range(2, max_dims+2)], err_style="unit_traces")
ax.set_title('Correlation with raw data by number of components')
ax.set_ylabel('Correlation')
ax.set_xlabel('Number of components')
plt.show()
return result | [
"def",
"describe",
"(",
"x",
",",
"reduce",
"=",
"'IncrementalPCA'",
",",
"max_dims",
"=",
"None",
",",
"show",
"=",
"True",
",",
"format_data",
"=",
"True",
")",
":",
"warnings",
".",
"warn",
"(",
"'When input data is large, this computation can take a long time.... | Create plot describing covariance with as a function of number of dimensions
This function correlates the raw data with reduced data to get a sense
for how well the data can be summarized with n dimensions. Useful for
evaluating quality of dimensionality reduced plots.
Parameters
----------
x : Numpy array, DataFrame or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
max_dims : int
Maximum number of dimensions to consider
show : bool
Plot the result (default : true)
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
result : dict
A dictionary with the analysis results. 'average' is the correlation
by number of components for all data. 'individual' is a list of lists,
where each list is a correlation by number of components vector (for each
input list). | [
"Create",
"plot",
"describing",
"covariance",
"with",
"as",
"a",
"function",
"of",
"number",
"of",
"dimensions"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/describe.py#L16-L106 | train | 214,832 |
ContextLab/hypertools | hypertools/tools/missing_inds.py | missing_inds | def missing_inds(x, format_data=True):
"""
Returns indices of missing data
This function is useful to identify rows of your array that contain missing
data or nans. The returned indices can be used to remove the rows with
missing data, or label the missing data points that are interpolated
using PPCA.
Parameters
----------
x : array or list of arrays
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
inds : list, or list of lists
A list of indices representing rows with missing data. If a list of
numpy arrays is passed, a list of lists will be returned.
"""
if format_data:
x = formatter(x, ppca=False)
inds = []
for arr in x:
if np.argwhere(np.isnan(arr)).size is 0:
inds.append(None)
else:
inds.append(np.argwhere(np.isnan(arr))[:,0])
if len(inds) > 1:
return inds
else:
return inds[0] | python | def missing_inds(x, format_data=True):
"""
Returns indices of missing data
This function is useful to identify rows of your array that contain missing
data or nans. The returned indices can be used to remove the rows with
missing data, or label the missing data points that are interpolated
using PPCA.
Parameters
----------
x : array or list of arrays
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
inds : list, or list of lists
A list of indices representing rows with missing data. If a list of
numpy arrays is passed, a list of lists will be returned.
"""
if format_data:
x = formatter(x, ppca=False)
inds = []
for arr in x:
if np.argwhere(np.isnan(arr)).size is 0:
inds.append(None)
else:
inds.append(np.argwhere(np.isnan(arr))[:,0])
if len(inds) > 1:
return inds
else:
return inds[0] | [
"def",
"missing_inds",
"(",
"x",
",",
"format_data",
"=",
"True",
")",
":",
"if",
"format_data",
":",
"x",
"=",
"formatter",
"(",
"x",
",",
"ppca",
"=",
"False",
")",
"inds",
"=",
"[",
"]",
"for",
"arr",
"in",
"x",
":",
"if",
"np",
".",
"argwhere... | Returns indices of missing data
This function is useful to identify rows of your array that contain missing
data or nans. The returned indices can be used to remove the rows with
missing data, or label the missing data points that are interpolated
using PPCA.
Parameters
----------
x : array or list of arrays
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
inds : list, or list of lists
A list of indices representing rows with missing data. If a list of
numpy arrays is passed, a list of lists will be returned. | [
"Returns",
"indices",
"of",
"missing",
"data"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/missing_inds.py#L7-L43 | train | 214,833 |
ContextLab/hypertools | hypertools/tools/normalize.py | normalize | def normalize(x, normalize='across', internal=False, format_data=True):
"""
Z-transform the columns or rows of an array, or list of arrays
This function normalizes the rows or columns of the input array(s). This
can be useful because data reduction and machine learning techniques are
sensitive to scaling differences between features. By default, the function
is set to normalize 'across' the columns of all lists, but it can also
normalize the columns 'within' each individual list, or alternatively, for
each row in the array.
Parameters
----------
x : Numpy array or list of arrays
This can either be a single array, or list of arrays
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
normalized_x : Numpy array or list of arrays
An array or list of arrays where the columns or rows are z-scored. If
the input was a list, a list is returned. Otherwise, an array is
returned.
"""
assert normalize in ['across','within','row', False, None], "scale_type must be across, within, row or none."
if normalize in [False, None]:
return x
else:
if format_data:
x = formatter(x, ppca=True)
zscore = lambda X, y: (y - np.mean(X)) / np.std(X) if len(set(y)) > 1 else np.zeros(y.shape)
if normalize == 'across':
x_stacked=np.vstack(x)
normalized_x = [np.array([zscore(x_stacked[:,j], i[:,j]) for j in range(i.shape[1])]).T for i in x]
elif normalize == 'within':
normalized_x = [np.array([zscore(i[:,j], i[:,j]) for j in range(i.shape[1])]).T for i in x]
elif normalize == 'row':
normalized_x = [np.array([zscore(i[j,:], i[j,:]) for j in range(i.shape[0])]) for i in x]
if internal or len(normalized_x)>1:
return normalized_x
else:
return normalized_x[0] | python | def normalize(x, normalize='across', internal=False, format_data=True):
"""
Z-transform the columns or rows of an array, or list of arrays
This function normalizes the rows or columns of the input array(s). This
can be useful because data reduction and machine learning techniques are
sensitive to scaling differences between features. By default, the function
is set to normalize 'across' the columns of all lists, but it can also
normalize the columns 'within' each individual list, or alternatively, for
each row in the array.
Parameters
----------
x : Numpy array or list of arrays
This can either be a single array, or list of arrays
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
normalized_x : Numpy array or list of arrays
An array or list of arrays where the columns or rows are z-scored. If
the input was a list, a list is returned. Otherwise, an array is
returned.
"""
assert normalize in ['across','within','row', False, None], "scale_type must be across, within, row or none."
if normalize in [False, None]:
return x
else:
if format_data:
x = formatter(x, ppca=True)
zscore = lambda X, y: (y - np.mean(X)) / np.std(X) if len(set(y)) > 1 else np.zeros(y.shape)
if normalize == 'across':
x_stacked=np.vstack(x)
normalized_x = [np.array([zscore(x_stacked[:,j], i[:,j]) for j in range(i.shape[1])]).T for i in x]
elif normalize == 'within':
normalized_x = [np.array([zscore(i[:,j], i[:,j]) for j in range(i.shape[1])]).T for i in x]
elif normalize == 'row':
normalized_x = [np.array([zscore(i[j,:], i[j,:]) for j in range(i.shape[0])]) for i in x]
if internal or len(normalized_x)>1:
return normalized_x
else:
return normalized_x[0] | [
"def",
"normalize",
"(",
"x",
",",
"normalize",
"=",
"'across'",
",",
"internal",
"=",
"False",
",",
"format_data",
"=",
"True",
")",
":",
"assert",
"normalize",
"in",
"[",
"'across'",
",",
"'within'",
",",
"'row'",
",",
"False",
",",
"None",
"]",
",",... | Z-transform the columns or rows of an array, or list of arrays
This function normalizes the rows or columns of the input array(s). This
can be useful because data reduction and machine learning techniques are
sensitive to scaling differences between features. By default, the function
is set to normalize 'across' the columns of all lists, but it can also
normalize the columns 'within' each individual list, or alternatively, for
each row in the array.
Parameters
----------
x : Numpy array or list of arrays
This can either be a single array, or list of arrays
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
normalized_x : Numpy array or list of arrays
An array or list of arrays where the columns or rows are z-scored. If
the input was a list, a list is returned. Otherwise, an array is
returned. | [
"Z",
"-",
"transform",
"the",
"columns",
"or",
"rows",
"of",
"an",
"array",
"or",
"list",
"of",
"arrays"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/normalize.py#L12-L72 | train | 214,834 |
ContextLab/hypertools | hypertools/_externals/srm.py | SRM._init_structures | def _init_structures(self, data, subjects):
"""Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`.
"""
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
mu.append(np.mean(data[subject], 1))
rho2[subject] = 1
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
return x, mu, rho2, trace_xtx | python | def _init_structures(self, data, subjects):
"""Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`.
"""
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
mu.append(np.mean(data[subject], 1))
rho2[subject] = 1
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
return x, mu, rho2, trace_xtx | [
"def",
"_init_structures",
"(",
"self",
",",
"data",
",",
"subjects",
")",
":",
"x",
"=",
"[",
"]",
"mu",
"=",
"[",
"]",
"rho2",
"=",
"np",
".",
"zeros",
"(",
"subjects",
")",
"trace_xtx",
"=",
"np",
".",
"zeros",
"(",
"subjects",
")",
"for",
"su... | Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`. | [
"Initializes",
"data",
"structures",
"for",
"SRM",
"and",
"preprocess",
"the",
"data",
"."
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_externals/srm.py#L232-L270 | train | 214,835 |
ContextLab/hypertools | hypertools/_externals/srm.py | SRM._likelihood | def _likelihood(self, chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples):
"""Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value.
"""
log_det = (np.log(np.diag(chol_sigma_s_rhos) ** 2).sum() + log_det_psi
+ np.log(np.diag(chol_sigma_s) ** 2).sum())
loglikehood = -0.5 * samples * log_det - 0.5 * trace_xt_invsigma2_x
loglikehood += 0.5 * np.trace(
wt_invpsi_x.T.dot(inv_sigma_s_rhos).dot(wt_invpsi_x))
# + const --> -0.5*nTR*nvoxel*subjects*math.log(2*math.pi)
return loglikehood | python | def _likelihood(self, chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples):
"""Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value.
"""
log_det = (np.log(np.diag(chol_sigma_s_rhos) ** 2).sum() + log_det_psi
+ np.log(np.diag(chol_sigma_s) ** 2).sum())
loglikehood = -0.5 * samples * log_det - 0.5 * trace_xt_invsigma2_x
loglikehood += 0.5 * np.trace(
wt_invpsi_x.T.dot(inv_sigma_s_rhos).dot(wt_invpsi_x))
# + const --> -0.5*nTR*nvoxel*subjects*math.log(2*math.pi)
return loglikehood | [
"def",
"_likelihood",
"(",
"self",
",",
"chol_sigma_s_rhos",
",",
"log_det_psi",
",",
"chol_sigma_s",
",",
"trace_xt_invsigma2_x",
",",
"inv_sigma_s_rhos",
",",
"wt_invpsi_x",
",",
"samples",
")",
":",
"log_det",
"=",
"(",
"np",
".",
"log",
"(",
"np",
".",
"... | Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value. | [
"Calculate",
"the",
"log",
"-",
"likelihood",
"function"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_externals/srm.py#L272-L317 | train | 214,836 |
ContextLab/hypertools | hypertools/_externals/srm.py | DetSRM.fit | def fit(self, X, y=None):
"""Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Deterministic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.w_, self.s_ = self._srm(X)
return self | python | def fit(self, X, y=None):
"""Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Deterministic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.w_, self.s_ = self._srm(X)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"'Starting Deterministic SRM'",
")",
"# Check the number of subjects",
"if",
"len",
"(",
"X",
")",
"<=",
"1",
":",
"raise",
"ValueError",
"(",
"\"There are no... | Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used | [
"Compute",
"the",
"Deterministic",
"Shared",
"Response",
"Model"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_externals/srm.py#L488-L523 | train | 214,837 |
ContextLab/hypertools | hypertools/_externals/srm.py | DetSRM._objective_function | def _objective_function(self, data, w, s):
"""Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value.
"""
subjects = len(data)
objective = 0.0
for m in range(subjects):
objective += \
np.linalg.norm(data[m] - w[m].dot(s), 'fro')**2
return objective * 0.5 / data[0].shape[1] | python | def _objective_function(self, data, w, s):
"""Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value.
"""
subjects = len(data)
objective = 0.0
for m in range(subjects):
objective += \
np.linalg.norm(data[m] - w[m].dot(s), 'fro')**2
return objective * 0.5 / data[0].shape[1] | [
"def",
"_objective_function",
"(",
"self",
",",
"data",
",",
"w",
",",
"s",
")",
":",
"subjects",
"=",
"len",
"(",
"data",
")",
"objective",
"=",
"0.0",
"for",
"m",
"in",
"range",
"(",
"subjects",
")",
":",
"objective",
"+=",
"np",
".",
"linalg",
"... | Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value. | [
"Calculate",
"the",
"objective",
"function"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_externals/srm.py#L557-L584 | train | 214,838 |
ContextLab/hypertools | hypertools/tools/text2mat.py | text2mat | def text2mat(data, vectorizer='CountVectorizer',
semantic='LatentDirichletAllocation', corpus='wiki'):
"""
Turns a list of text samples into a matrix using a vectorizer and a text model
Parameters
----------
data : list (or list of lists) of text samples
The text data to transform
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
Returns
----------
transformed data : list of numpy arrays
The transformed text data
"""
if semantic is None:
semantic = 'LatentDirichletAllocation'
if vectorizer is None:
vectorizer = 'CountVectorizer'
model_is_fit=False
if corpus is not None:
if corpus in ('wiki', 'nips', 'sotus',):
if semantic == 'LatentDirichletAllocation' and vectorizer == 'CountVectorizer':
semantic = load(corpus + '_model')
vectorizer = None
model_is_fit = True
else:
corpus = np.array(load(corpus).get_data())
else:
corpus = np.array([corpus])
vtype = _check_mtype(vectorizer)
if vtype == 'str':
vectorizer_params = default_params(vectorizer)
elif vtype == 'dict':
vectorizer_params = default_params(vectorizer['model'], vectorizer['params'])
vectorizer = vectorizer['model']
elif vtype in ('class', 'class_instance'):
if hasattr(vectorizer, 'fit_transform'):
vectorizer_models.update({'user_model' : vectorizer})
vectorizer = 'user_model'
else:
raise RuntimeError('Error: Vectorizer model must have fit_transform '
'method following the scikit-learn API. See here '
'for more details: '
'http://scikit-learn.org/stable/data_transforms.html')
ttype = _check_mtype(semantic)
if ttype == 'str':
text_params = default_params(semantic)
elif ttype == 'dict':
text_params = default_params(semantic['model'], semantic['params'])
semantic = semantic['model']
elif ttype in ('class', 'class_instance'):
if hasattr(semantic, 'fit_transform'):
texts.update({'user_model' : semantic})
semantic = 'user_model'
else:
raise RuntimeError('Text model must have fit_transform '
'method following the scikit-learn API. See here '
'for more details: '
'http://scikit-learn.org/stable/data_transforms.html')
if vectorizer:
if vtype in ('str', 'dict'):
vmodel = vectorizer_models[vectorizer](**vectorizer_params)
elif vtype == 'class':
vmodel = vectorizer_models[vectorizer]()
elif vtype == 'class_instance':
vmodel = vectorizer_models[vectorizer]
else:
vmodel = None
if semantic:
if ttype in ('str', 'dict'):
tmodel = texts[semantic](**text_params)
elif ttype == 'class':
tmodel = texts[semantic]()
elif ttype == 'class_instance':
tmodel = texts[semantic]
else:
tmodel = None
if not isinstance(data, list):
data = [data]
if corpus is None:
_fit_models(vmodel, tmodel, data, model_is_fit)
else:
_fit_models(vmodel, tmodel, corpus, model_is_fit)
return _transform(vmodel, tmodel, data) | python | def text2mat(data, vectorizer='CountVectorizer',
semantic='LatentDirichletAllocation', corpus='wiki'):
"""
Turns a list of text samples into a matrix using a vectorizer and a text model
Parameters
----------
data : list (or list of lists) of text samples
The text data to transform
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
Returns
----------
transformed data : list of numpy arrays
The transformed text data
"""
if semantic is None:
semantic = 'LatentDirichletAllocation'
if vectorizer is None:
vectorizer = 'CountVectorizer'
model_is_fit=False
if corpus is not None:
if corpus in ('wiki', 'nips', 'sotus',):
if semantic == 'LatentDirichletAllocation' and vectorizer == 'CountVectorizer':
semantic = load(corpus + '_model')
vectorizer = None
model_is_fit = True
else:
corpus = np.array(load(corpus).get_data())
else:
corpus = np.array([corpus])
vtype = _check_mtype(vectorizer)
if vtype == 'str':
vectorizer_params = default_params(vectorizer)
elif vtype == 'dict':
vectorizer_params = default_params(vectorizer['model'], vectorizer['params'])
vectorizer = vectorizer['model']
elif vtype in ('class', 'class_instance'):
if hasattr(vectorizer, 'fit_transform'):
vectorizer_models.update({'user_model' : vectorizer})
vectorizer = 'user_model'
else:
raise RuntimeError('Error: Vectorizer model must have fit_transform '
'method following the scikit-learn API. See here '
'for more details: '
'http://scikit-learn.org/stable/data_transforms.html')
ttype = _check_mtype(semantic)
if ttype == 'str':
text_params = default_params(semantic)
elif ttype == 'dict':
text_params = default_params(semantic['model'], semantic['params'])
semantic = semantic['model']
elif ttype in ('class', 'class_instance'):
if hasattr(semantic, 'fit_transform'):
texts.update({'user_model' : semantic})
semantic = 'user_model'
else:
raise RuntimeError('Text model must have fit_transform '
'method following the scikit-learn API. See here '
'for more details: '
'http://scikit-learn.org/stable/data_transforms.html')
if vectorizer:
if vtype in ('str', 'dict'):
vmodel = vectorizer_models[vectorizer](**vectorizer_params)
elif vtype == 'class':
vmodel = vectorizer_models[vectorizer]()
elif vtype == 'class_instance':
vmodel = vectorizer_models[vectorizer]
else:
vmodel = None
if semantic:
if ttype in ('str', 'dict'):
tmodel = texts[semantic](**text_params)
elif ttype == 'class':
tmodel = texts[semantic]()
elif ttype == 'class_instance':
tmodel = texts[semantic]
else:
tmodel = None
if not isinstance(data, list):
data = [data]
if corpus is None:
_fit_models(vmodel, tmodel, data, model_is_fit)
else:
_fit_models(vmodel, tmodel, corpus, model_is_fit)
return _transform(vmodel, tmodel, data) | [
"def",
"text2mat",
"(",
"data",
",",
"vectorizer",
"=",
"'CountVectorizer'",
",",
"semantic",
"=",
"'LatentDirichletAllocation'",
",",
"corpus",
"=",
"'wiki'",
")",
":",
"if",
"semantic",
"is",
"None",
":",
"semantic",
"=",
"'LatentDirichletAllocation'",
"if",
"... | Turns a list of text samples into a matrix using a vectorizer and a text model
Parameters
----------
data : list (or list of lists) of text samples
The text data to transform
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
Returns
----------
transformed data : list of numpy arrays
The transformed text data | [
"Turns",
"a",
"list",
"of",
"text",
"samples",
"into",
"a",
"matrix",
"using",
"a",
"vectorizer",
"and",
"a",
"text",
"model"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/text2mat.py#L28-L148 | train | 214,839 |
ContextLab/hypertools | hypertools/_shared/helpers.py | patch_lines | def patch_lines(x):
"""
Draw lines between groups
"""
for idx in range(len(x)-1):
x[idx] = np.vstack([x[idx], x[idx+1][0,:]])
return x | python | def patch_lines(x):
"""
Draw lines between groups
"""
for idx in range(len(x)-1):
x[idx] = np.vstack([x[idx], x[idx+1][0,:]])
return x | [
"def",
"patch_lines",
"(",
"x",
")",
":",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"x",
")",
"-",
"1",
")",
":",
"x",
"[",
"idx",
"]",
"=",
"np",
".",
"vstack",
"(",
"[",
"x",
"[",
"idx",
"]",
",",
"x",
"[",
"idx",
"+",
"1",
"]",
"... | Draw lines between groups | [
"Draw",
"lines",
"between",
"groups"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_shared/helpers.py#L161-L167 | train | 214,840 |
ContextLab/hypertools | hypertools/_shared/helpers.py | check_geo | def check_geo(geo):
""" Checks a geo and makes sure the text fields are not binary """
geo = copy.copy(geo)
def fix_item(item):
if isinstance(item, six.binary_type):
return item.decode()
return item
def fix_list(lst):
return [fix_item(i) for i in lst]
if isinstance(geo.reduce, six.binary_type):
geo.reduce = geo.reduce.decode()
for key in geo.kwargs.keys():
if geo.kwargs[key] is not None:
if isinstance(geo.kwargs[key], (list, np.ndarray)):
geo.kwargs[key] = fix_list(geo.kwargs[key])
elif isinstance(geo.kwargs[key], six.binary_type):
geo.kwargs[key] = fix_item(geo.kwargs[key])
return geo | python | def check_geo(geo):
""" Checks a geo and makes sure the text fields are not binary """
geo = copy.copy(geo)
def fix_item(item):
if isinstance(item, six.binary_type):
return item.decode()
return item
def fix_list(lst):
return [fix_item(i) for i in lst]
if isinstance(geo.reduce, six.binary_type):
geo.reduce = geo.reduce.decode()
for key in geo.kwargs.keys():
if geo.kwargs[key] is not None:
if isinstance(geo.kwargs[key], (list, np.ndarray)):
geo.kwargs[key] = fix_list(geo.kwargs[key])
elif isinstance(geo.kwargs[key], six.binary_type):
geo.kwargs[key] = fix_item(geo.kwargs[key])
return geo | [
"def",
"check_geo",
"(",
"geo",
")",
":",
"geo",
"=",
"copy",
".",
"copy",
"(",
"geo",
")",
"def",
"fix_item",
"(",
"item",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"item",
".",
"decode",
"(",
... | Checks a geo and makes sure the text fields are not binary | [
"Checks",
"a",
"geo",
"and",
"makes",
"sure",
"the",
"text",
"fields",
"are",
"not",
"binary"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_shared/helpers.py#L232-L251 | train | 214,841 |
ContextLab/hypertools | hypertools/tools/df2mat.py | df2mat | def df2mat(data, return_labels=False):
"""
Transforms a Pandas DataFrame into a Numpy array with binarized text columns
This function transforms single-level df to an array so it can be plotted
with HyperTools. Additionally, it uses the Pandas.Dataframe.get_dummies
function to transform text columns into binary vectors, or
'dummy variables'.
Parameters
----------
data : A single-level Pandas DataFrame
The df that you want to convert. Note that this currently only works
with single-level (not Multi-level indices).
Returns
----------
plot_data : Numpy array
A Numpy array where text columns are turned into binary vectors.
labels : list (optional)
A list of column labels for the numpy array. To return this, set
return_labels=True.
"""
df_str = data.select_dtypes(include=['object'])
df_num = data.select_dtypes(exclude=['object'])
for colname in df_str.columns:
df_num = df_num.join(pd.get_dummies(data[colname], prefix=colname))
plot_data = df_num.as_matrix()
labels=list(df_num.columns.values)
if return_labels:
return plot_data,labels
else:
return plot_data | python | def df2mat(data, return_labels=False):
"""
Transforms a Pandas DataFrame into a Numpy array with binarized text columns
This function transforms single-level df to an array so it can be plotted
with HyperTools. Additionally, it uses the Pandas.Dataframe.get_dummies
function to transform text columns into binary vectors, or
'dummy variables'.
Parameters
----------
data : A single-level Pandas DataFrame
The df that you want to convert. Note that this currently only works
with single-level (not Multi-level indices).
Returns
----------
plot_data : Numpy array
A Numpy array where text columns are turned into binary vectors.
labels : list (optional)
A list of column labels for the numpy array. To return this, set
return_labels=True.
"""
df_str = data.select_dtypes(include=['object'])
df_num = data.select_dtypes(exclude=['object'])
for colname in df_str.columns:
df_num = df_num.join(pd.get_dummies(data[colname], prefix=colname))
plot_data = df_num.as_matrix()
labels=list(df_num.columns.values)
if return_labels:
return plot_data,labels
else:
return plot_data | [
"def",
"df2mat",
"(",
"data",
",",
"return_labels",
"=",
"False",
")",
":",
"df_str",
"=",
"data",
".",
"select_dtypes",
"(",
"include",
"=",
"[",
"'object'",
"]",
")",
"df_num",
"=",
"data",
".",
"select_dtypes",
"(",
"exclude",
"=",
"[",
"'object'",
... | Transforms a Pandas DataFrame into a Numpy array with binarized text columns
This function transforms single-level df to an array so it can be plotted
with HyperTools. Additionally, it uses the Pandas.Dataframe.get_dummies
function to transform text columns into binary vectors, or
'dummy variables'.
Parameters
----------
data : A single-level Pandas DataFrame
The df that you want to convert. Note that this currently only works
with single-level (not Multi-level indices).
Returns
----------
plot_data : Numpy array
A Numpy array where text columns are turned into binary vectors.
labels : list (optional)
A list of column labels for the numpy array. To return this, set
return_labels=True. | [
"Transforms",
"a",
"Pandas",
"DataFrame",
"into",
"a",
"Numpy",
"array",
"with",
"binarized",
"text",
"columns"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/df2mat.py#L6-L45 | train | 214,842 |
ContextLab/hypertools | hypertools/tools/load.py | load | def load(dataset, reduce=None, ndims=None, align=None, normalize=None):
"""
Load a .geo file or example data
Parameters
----------
dataset : string
The name of the example dataset. Can be a `.geo` file, or one of a
number of example datasets listed below.
`weights` is list of 2 numpy arrays, each containing average brain
activity (fMRI) from 18 subjects listening to the same story, fit using
Hierarchical Topographic Factor Analysis (HTFA) with 100 nodes. The rows
are fMRI measurements and the columns are parameters of the model.
`weights_sample` is a sample of 3 subjects from that dataset.
`weights_avg` is the dataset split in half and averaged into two groups.
`spiral` is numpy array containing data for a 3D spiral, used to
highlight the `procrustes` function.
`mushrooms` is a numpy array comprised of features (columns) of a
collection of 8,124 mushroomm samples (rows).
`sotus` is a collection of State of the Union speeches from 1989-2018.
`wiki` is a collection of wikipedia pages used to fit wiki-model.
`wiki-model` is a sklearn Pipeline (CountVectorizer->LatentDirichletAllocation)
trained on a sample of wikipedia articles. It can be used to transform
text to topic vectors.
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
data : Numpy Array
Example data
"""
if dataset[-4:] == '.geo':
geo = dd.io.load(dataset)
if 'dtype' in geo:
if 'list' in geo['dtype']:
geo['data'] = list(geo['data'])
elif 'df' in geo['dtype']:
geo['data'] = pd.DataFrame(geo['data'])
geo['xform_data'] = list(geo['xform_data'])
data = DataGeometry(**geo)
elif dataset in datadict.keys():
data = _load_data(dataset, datadict[dataset])
else:
raise RuntimeError('No data loaded. Please specify a .geo file or '
'one of the following sample files: weights, '
'weights_avg, weights_sample, spiral, mushrooms, '
'wiki, nips or sotus.')
if data is not None:
if dataset in ('wiki_model', 'nips_model', 'sotus_model'):
return data
if isinstance(data, DataGeometry):
if any([reduce, ndims, align, normalize]):
from ..plot.plot import plot
if ndims:
if reduce is None:
reduce='IncrementalPCA'
d = analyze(data.get_data(), reduce=reduce, ndims=ndims, align=align, normalize=normalize)
return plot(d, show=False)
else:
return data
else:
return analyze(data, reduce=reduce, ndims=ndims, align=align, normalize=normalize) | python | def load(dataset, reduce=None, ndims=None, align=None, normalize=None):
"""
Load a .geo file or example data
Parameters
----------
dataset : string
The name of the example dataset. Can be a `.geo` file, or one of a
number of example datasets listed below.
`weights` is list of 2 numpy arrays, each containing average brain
activity (fMRI) from 18 subjects listening to the same story, fit using
Hierarchical Topographic Factor Analysis (HTFA) with 100 nodes. The rows
are fMRI measurements and the columns are parameters of the model.
`weights_sample` is a sample of 3 subjects from that dataset.
`weights_avg` is the dataset split in half and averaged into two groups.
`spiral` is numpy array containing data for a 3D spiral, used to
highlight the `procrustes` function.
`mushrooms` is a numpy array comprised of features (columns) of a
collection of 8,124 mushroomm samples (rows).
`sotus` is a collection of State of the Union speeches from 1989-2018.
`wiki` is a collection of wikipedia pages used to fit wiki-model.
`wiki-model` is a sklearn Pipeline (CountVectorizer->LatentDirichletAllocation)
trained on a sample of wikipedia articles. It can be used to transform
text to topic vectors.
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
data : Numpy Array
Example data
"""
if dataset[-4:] == '.geo':
geo = dd.io.load(dataset)
if 'dtype' in geo:
if 'list' in geo['dtype']:
geo['data'] = list(geo['data'])
elif 'df' in geo['dtype']:
geo['data'] = pd.DataFrame(geo['data'])
geo['xform_data'] = list(geo['xform_data'])
data = DataGeometry(**geo)
elif dataset in datadict.keys():
data = _load_data(dataset, datadict[dataset])
else:
raise RuntimeError('No data loaded. Please specify a .geo file or '
'one of the following sample files: weights, '
'weights_avg, weights_sample, spiral, mushrooms, '
'wiki, nips or sotus.')
if data is not None:
if dataset in ('wiki_model', 'nips_model', 'sotus_model'):
return data
if isinstance(data, DataGeometry):
if any([reduce, ndims, align, normalize]):
from ..plot.plot import plot
if ndims:
if reduce is None:
reduce='IncrementalPCA'
d = analyze(data.get_data(), reduce=reduce, ndims=ndims, align=align, normalize=normalize)
return plot(d, show=False)
else:
return data
else:
return analyze(data, reduce=reduce, ndims=ndims, align=align, normalize=normalize) | [
"def",
"load",
"(",
"dataset",
",",
"reduce",
"=",
"None",
",",
"ndims",
"=",
"None",
",",
"align",
"=",
"None",
",",
"normalize",
"=",
"None",
")",
":",
"if",
"dataset",
"[",
"-",
"4",
":",
"]",
"==",
"'.geo'",
":",
"geo",
"=",
"dd",
".",
"io"... | Load a .geo file or example data
Parameters
----------
dataset : string
The name of the example dataset. Can be a `.geo` file, or one of a
number of example datasets listed below.
`weights` is list of 2 numpy arrays, each containing average brain
activity (fMRI) from 18 subjects listening to the same story, fit using
Hierarchical Topographic Factor Analysis (HTFA) with 100 nodes. The rows
are fMRI measurements and the columns are parameters of the model.
`weights_sample` is a sample of 3 subjects from that dataset.
`weights_avg` is the dataset split in half and averaged into two groups.
`spiral` is numpy array containing data for a 3D spiral, used to
highlight the `procrustes` function.
`mushrooms` is a numpy array comprised of features (columns) of a
collection of 8,124 mushroomm samples (rows).
`sotus` is a collection of State of the Union speeches from 1989-2018.
`wiki` is a collection of wikipedia pages used to fit wiki-model.
`wiki-model` is a sklearn Pipeline (CountVectorizer->LatentDirichletAllocation)
trained on a sample of wikipedia articles. It can be used to transform
text to topic vectors.
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
data : Numpy Array
Example data | [
"Load",
"a",
".",
"geo",
"file",
"or",
"example",
"data"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/load.py#L30-L129 | train | 214,843 |
ContextLab/hypertools | hypertools/datageometry.py | DataGeometry.transform | def transform(self, data=None):
"""
Return transformed data, or transform new data using the same model
parameters
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to transform. If no data is passed, the xform_data from
the DataGeometry object will be returned.
Returns
----------
xformed_data : list of numpy arrays
The transformed data
"""
# if no new data passed,
if data is None:
return self.xform_data
else:
formatted = format_data(
data,
semantic=self.semantic,
vectorizer=self.vectorizer,
corpus=self.corpus,
ppca=True)
norm = normalizer(formatted, normalize=self.normalize)
reduction = reducer(
norm,
reduce=self.reduce,
ndims=self.reduce['params']['n_components'])
return aligner(reduction, align=self.align) | python | def transform(self, data=None):
"""
Return transformed data, or transform new data using the same model
parameters
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to transform. If no data is passed, the xform_data from
the DataGeometry object will be returned.
Returns
----------
xformed_data : list of numpy arrays
The transformed data
"""
# if no new data passed,
if data is None:
return self.xform_data
else:
formatted = format_data(
data,
semantic=self.semantic,
vectorizer=self.vectorizer,
corpus=self.corpus,
ppca=True)
norm = normalizer(formatted, normalize=self.normalize)
reduction = reducer(
norm,
reduce=self.reduce,
ndims=self.reduce['params']['n_components'])
return aligner(reduction, align=self.align) | [
"def",
"transform",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"# if no new data passed,",
"if",
"data",
"is",
"None",
":",
"return",
"self",
".",
"xform_data",
"else",
":",
"formatted",
"=",
"format_data",
"(",
"data",
",",
"semantic",
"=",
"self",
... | Return transformed data, or transform new data using the same model
parameters
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to transform. If no data is passed, the xform_data from
the DataGeometry object will be returned.
Returns
----------
xformed_data : list of numpy arrays
The transformed data | [
"Return",
"transformed",
"data",
"or",
"transform",
"new",
"data",
"using",
"the",
"same",
"model",
"parameters"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/datageometry.py#L110-L142 | train | 214,844 |
ContextLab/hypertools | hypertools/datageometry.py | DataGeometry.save | def save(self, fname, compression='blosc'):
"""
Save method for the data geometry object
The data will be saved as a 'geo' file, which is a dictionary containing
the elements of a data geometry object saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.geo) is not specified,
it will be appended.
compression : str
The kind of compression to use. See the deepdish documentation for
options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
"""
if hasattr(self, 'dtype'):
if 'list' in self.dtype:
data = np.array(self.data)
elif 'df' in self.dtype:
data = {k: np.array(v).astype('str') for k, v in self.data.to_dict('list').items()}
else:
data = self.data
# put geo vars into a dict
geo = {
'data' : data,
'xform_data' : np.array(self.xform_data),
'reduce' : self.reduce,
'align' : self.align,
'normalize' : self.normalize,
'semantic' : self.semantic,
'corpus' : np.array(self.corpus) if isinstance(self.corpus, list) else self.corpus,
'kwargs' : self.kwargs,
'version' : self.version,
'dtype' : self.dtype
}
# if extension wasn't included, add it
if fname[-4:]!='.geo':
fname+='.geo'
# save
dd.io.save(fname, geo, compression=compression) | python | def save(self, fname, compression='blosc'):
"""
Save method for the data geometry object
The data will be saved as a 'geo' file, which is a dictionary containing
the elements of a data geometry object saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.geo) is not specified,
it will be appended.
compression : str
The kind of compression to use. See the deepdish documentation for
options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
"""
if hasattr(self, 'dtype'):
if 'list' in self.dtype:
data = np.array(self.data)
elif 'df' in self.dtype:
data = {k: np.array(v).astype('str') for k, v in self.data.to_dict('list').items()}
else:
data = self.data
# put geo vars into a dict
geo = {
'data' : data,
'xform_data' : np.array(self.xform_data),
'reduce' : self.reduce,
'align' : self.align,
'normalize' : self.normalize,
'semantic' : self.semantic,
'corpus' : np.array(self.corpus) if isinstance(self.corpus, list) else self.corpus,
'kwargs' : self.kwargs,
'version' : self.version,
'dtype' : self.dtype
}
# if extension wasn't included, add it
if fname[-4:]!='.geo':
fname+='.geo'
# save
dd.io.save(fname, geo, compression=compression) | [
"def",
"save",
"(",
"self",
",",
"fname",
",",
"compression",
"=",
"'blosc'",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'dtype'",
")",
":",
"if",
"'list'",
"in",
"self",
".",
"dtype",
":",
"data",
"=",
"np",
".",
"array",
"(",
"self",
".",
"d... | Save method for the data geometry object
The data will be saved as a 'geo' file, which is a dictionary containing
the elements of a data geometry object saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.geo) is not specified,
it will be appended.
compression : str
The kind of compression to use. See the deepdish documentation for
options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save | [
"Save",
"method",
"for",
"the",
"data",
"geometry",
"object"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/datageometry.py#L191-L238 | train | 214,845 |
ContextLab/hypertools | hypertools/tools/analyze.py | analyze | def analyze(data, normalize=None, reduce=None, ndims=None, align=None, internal=False):
"""
Wrapper function for normalize -> reduce -> align transformations.
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
The data to analyze
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
analyzed_data : list of numpy arrays
The processed data
"""
# return processed data
return aligner(reducer(normalizer(data, normalize=normalize, internal=internal),
reduce=reduce, ndims=ndims, internal=internal), align=align) | python | def analyze(data, normalize=None, reduce=None, ndims=None, align=None, internal=False):
"""
Wrapper function for normalize -> reduce -> align transformations.
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
The data to analyze
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
analyzed_data : list of numpy arrays
The processed data
"""
# return processed data
return aligner(reducer(normalizer(data, normalize=normalize, internal=internal),
reduce=reduce, ndims=ndims, internal=internal), align=align) | [
"def",
"analyze",
"(",
"data",
",",
"normalize",
"=",
"None",
",",
"reduce",
"=",
"None",
",",
"ndims",
"=",
"None",
",",
"align",
"=",
"None",
",",
"internal",
"=",
"False",
")",
":",
"# return processed data",
"return",
"aligner",
"(",
"reducer",
"(",
... | Wrapper function for normalize -> reduce -> align transformations.
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
The data to analyze
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
analyzed_data : list of numpy arrays
The processed data | [
"Wrapper",
"function",
"for",
"normalize",
"-",
">",
"reduce",
"-",
">",
"align",
"transformations",
"."
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/analyze.py#L8-L54 | train | 214,846 |
ContextLab/hypertools | hypertools/tools/reduce.py | reduce | def reduce(x, reduce='IncrementalPCA', ndims=None, normalize=None, align=None,
model=None, model_params=None, internal=False, format_data=True):
"""
Reduces dimensionality of an array, or list of arrays
Parameters
----------
x : Numpy array or list of arrays
Dimensionality reduction using PCA is performed on this array.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, MDS and UMAP.
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA',
'params' : {'whiten' : True}}. See scikit-learn specific model docs
for details on parameters supported for each model.
ndims : int
Number of dimensions to reduce
format_data : bool
Whether or not to first call the format_data function (default: True).
model : None
Deprecated argument. Please use reduce.
model_params : None
Deprecated argument. Please use reduce.
align : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
x_reduced : Numpy array or list of arrays
The reduced data with ndims dimensionality is returned. If the input
is a list, a list is returned.
"""
# deprecated warning
if (model is not None) or (model_params is not None):
warnings.warn('Model and model params will be deprecated. Please use the \
reduce keyword. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce')
reduce = {}
reduce['model'] = model
reduce['params'] = model_params
# if model is None, just return data
if reduce is None:
return x
else:
# common format
if format_data:
x = formatter(x, ppca=True)
if np.vstack([i for i in x]).shape[0]==1:
warnings.warn('Cannot reduce the dimensionality of a single row of'
' data. Return zeros length of ndims')
return [np.zeros((1, ndims))]
if ndims:
if np.vstack([i for i in x]).shape[0]<ndims:
warnings.warn('The number of rows in your data is less than ndims.'
' The data will be reduced to the number of rows.')
# deprecation warnings
if normalize is not None:
warnings.warn('The normalize argument will be deprecated for this function. Please use the \
analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')
x = normalizer(x, normalize=normalize)
if align is not None:
warnings.warn('The align argument will be deprecated for this function. Please use the \
analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')
x = aligner(x, align=align)
# if the shape of the data is already less than ndims, just return it
if ndims is None:
return x
elif all([i.shape[1]<=ndims for i in x]):
return x
# if reduce is a string, find the corresponding model
if type(reduce) in [str, np.string_]:
model = models[reduce]
model_params = {
'n_components' : ndims
}
# if its a dict, use custom params
elif type(reduce) is dict:
if isinstance((reduce['model']), six.string_types):
model = models[reduce['model']]
if reduce['params'] is None:
model_params = {
'n_components' : ndims
}
else:
model_params = reduce['params']
if ndims:
model_params = {
'n_components' : ndims
}
# initialize model
model = model(**model_params)
# reduce data
x_reduced = reduce_list(x, model)
# return data
if internal or len(x_reduced)>1:
return x_reduced
else:
return x_reduced[0] | python | def reduce(x, reduce='IncrementalPCA', ndims=None, normalize=None, align=None,
model=None, model_params=None, internal=False, format_data=True):
"""
Reduces dimensionality of an array, or list of arrays
Parameters
----------
x : Numpy array or list of arrays
Dimensionality reduction using PCA is performed on this array.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, MDS and UMAP.
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA',
'params' : {'whiten' : True}}. See scikit-learn specific model docs
for details on parameters supported for each model.
ndims : int
Number of dimensions to reduce
format_data : bool
Whether or not to first call the format_data function (default: True).
model : None
Deprecated argument. Please use reduce.
model_params : None
Deprecated argument. Please use reduce.
align : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
x_reduced : Numpy array or list of arrays
The reduced data with ndims dimensionality is returned. If the input
is a list, a list is returned.
"""
# deprecated warning
if (model is not None) or (model_params is not None):
warnings.warn('Model and model params will be deprecated. Please use the \
reduce keyword. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce')
reduce = {}
reduce['model'] = model
reduce['params'] = model_params
# if model is None, just return data
if reduce is None:
return x
else:
# common format
if format_data:
x = formatter(x, ppca=True)
if np.vstack([i for i in x]).shape[0]==1:
warnings.warn('Cannot reduce the dimensionality of a single row of'
' data. Return zeros length of ndims')
return [np.zeros((1, ndims))]
if ndims:
if np.vstack([i for i in x]).shape[0]<ndims:
warnings.warn('The number of rows in your data is less than ndims.'
' The data will be reduced to the number of rows.')
# deprecation warnings
if normalize is not None:
warnings.warn('The normalize argument will be deprecated for this function. Please use the \
analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')
x = normalizer(x, normalize=normalize)
if align is not None:
warnings.warn('The align argument will be deprecated for this function. Please use the \
analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')
x = aligner(x, align=align)
# if the shape of the data is already less than ndims, just return it
if ndims is None:
return x
elif all([i.shape[1]<=ndims for i in x]):
return x
# if reduce is a string, find the corresponding model
if type(reduce) in [str, np.string_]:
model = models[reduce]
model_params = {
'n_components' : ndims
}
# if its a dict, use custom params
elif type(reduce) is dict:
if isinstance((reduce['model']), six.string_types):
model = models[reduce['model']]
if reduce['params'] is None:
model_params = {
'n_components' : ndims
}
else:
model_params = reduce['params']
if ndims:
model_params = {
'n_components' : ndims
}
# initialize model
model = model(**model_params)
# reduce data
x_reduced = reduce_list(x, model)
# return data
if internal or len(x_reduced)>1:
return x_reduced
else:
return x_reduced[0] | [
"def",
"reduce",
"(",
"x",
",",
"reduce",
"=",
"'IncrementalPCA'",
",",
"ndims",
"=",
"None",
",",
"normalize",
"=",
"None",
",",
"align",
"=",
"None",
",",
"model",
"=",
"None",
",",
"model_params",
"=",
"None",
",",
"internal",
"=",
"False",
",",
"... | Reduces dimensionality of an array, or list of arrays
Parameters
----------
x : Numpy array or list of arrays
Dimensionality reduction using PCA is performed on this array.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, MDS and UMAP.
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA',
'params' : {'whiten' : True}}. See scikit-learn specific model docs
for details on parameters supported for each model.
ndims : int
Number of dimensions to reduce
format_data : bool
Whether or not to first call the format_data function (default: True).
model : None
Deprecated argument. Please use reduce.
model_params : None
Deprecated argument. Please use reduce.
align : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
x_reduced : Numpy array or list of arrays
The reduced data with ndims dimensionality is returned. If the input
is a list, a list is returned. | [
"Reduces",
"dimensionality",
"of",
"an",
"array",
"or",
"list",
"of",
"arrays"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/reduce.py#L36-L158 | train | 214,847 |
ContextLab/hypertools | hypertools/tools/cluster.py | cluster | def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True):
"""
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
"""
if cluster == None:
return x
elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \
(isinstance(cluster, dict) and cluster['model']=='HDBSCAN'):
if not _has_hdbscan:
raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11')
if ndims != None:
warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.')
if format_data:
x = formatter(x, ppca=True)
# if reduce is a string, find the corresponding model
if isinstance(cluster, six.string_types):
model = models[cluster]
if cluster != 'HDBSCAN':
model_params = {
'n_clusters' : n_clusters
}
else:
model_params = {}
# if its a dict, use custom params
elif type(cluster) is dict:
if isinstance(cluster['model'], six.string_types):
model = models[cluster['model']]
model_params = cluster['params']
# initialize model
model = model(**model_params)
# fit the model
model.fit(np.vstack(x))
# return the labels
return list(model.labels_) | python | def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True):
"""
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
"""
if cluster == None:
return x
elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \
(isinstance(cluster, dict) and cluster['model']=='HDBSCAN'):
if not _has_hdbscan:
raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11')
if ndims != None:
warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.')
if format_data:
x = formatter(x, ppca=True)
# if reduce is a string, find the corresponding model
if isinstance(cluster, six.string_types):
model = models[cluster]
if cluster != 'HDBSCAN':
model_params = {
'n_clusters' : n_clusters
}
else:
model_params = {}
# if its a dict, use custom params
elif type(cluster) is dict:
if isinstance(cluster['model'], six.string_types):
model = models[cluster['model']]
model_params = cluster['params']
# initialize model
model = model(**model_params)
# fit the model
model.fit(np.vstack(x))
# return the labels
return list(model.labels_) | [
"def",
"cluster",
"(",
"x",
",",
"cluster",
"=",
"'KMeans'",
",",
"n_clusters",
"=",
"3",
",",
"ndims",
"=",
"None",
",",
"format_data",
"=",
"True",
")",
":",
"if",
"cluster",
"==",
"None",
":",
"return",
"x",
"elif",
"(",
"isinstance",
"(",
"cluste... | Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels | [
"Performs",
"clustering",
"analysis",
"and",
"returns",
"a",
"list",
"of",
"cluster",
"labels"
] | b76c7ac8061998b560e969ff8e4f4c915088e7a0 | https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/cluster.py#L28-L100 | train | 214,848 |
lorien/grab | grab/transport/curl.py | build_grab_exception | def build_grab_exception(ex, curl):
"""
Build Grab exception from the pycurl exception
Args:
ex - the original pycurl exception
curl - the Curl instance raised the exception
"""
# CURLE_WRITE_ERROR (23)
# An error occurred when writing received data to a local file, or
# an error was returned to libcurl from a write callback.
# This exception should be ignored if grab_callback_interrupted
# flag # is enabled (this happens when nohead or nobody options
# enabled)
#
# Also this error is raised when curl receives KeyboardInterrupt
# while it is processing some callback function
# (WRITEFUNCTION, HEADERFUNCTIO, etc)
# If you think WTF then see details here:
# https://github.com/pycurl/pycurl/issues/413
if ex.args[0] == 23:
if getattr(curl, 'grab_callback_interrupted', None) is True:
# If the execution of body_process callback is
# interrupted (body_maxsize, nobody and other options)
# then the pycurl raised exception with code 23
# We should ignore it
return None
else:
return error.GrabNetworkError(ex.args[1], ex)
else:
if ex.args[0] == 28:
return error.GrabTimeoutError(ex.args[1], ex)
elif ex.args[0] == 7:
return error.GrabConnectionError(ex.args[1], ex)
elif ex.args[0] == 67:
return error.GrabAuthError(ex.args[1], ex)
elif ex.args[0] == 47:
return error.GrabTooManyRedirectsError(ex.args[1], ex)
elif ex.args[0] == 6:
return error.GrabCouldNotResolveHostError(ex.args[1], ex)
elif ex.args[0] == 3:
return error.GrabInvalidUrl(ex.args[1], ex)
else:
return error.GrabNetworkError(ex.args[1], ex) | python | def build_grab_exception(ex, curl):
"""
Build Grab exception from the pycurl exception
Args:
ex - the original pycurl exception
curl - the Curl instance raised the exception
"""
# CURLE_WRITE_ERROR (23)
# An error occurred when writing received data to a local file, or
# an error was returned to libcurl from a write callback.
# This exception should be ignored if grab_callback_interrupted
# flag # is enabled (this happens when nohead or nobody options
# enabled)
#
# Also this error is raised when curl receives KeyboardInterrupt
# while it is processing some callback function
# (WRITEFUNCTION, HEADERFUNCTIO, etc)
# If you think WTF then see details here:
# https://github.com/pycurl/pycurl/issues/413
if ex.args[0] == 23:
if getattr(curl, 'grab_callback_interrupted', None) is True:
# If the execution of body_process callback is
# interrupted (body_maxsize, nobody and other options)
# then the pycurl raised exception with code 23
# We should ignore it
return None
else:
return error.GrabNetworkError(ex.args[1], ex)
else:
if ex.args[0] == 28:
return error.GrabTimeoutError(ex.args[1], ex)
elif ex.args[0] == 7:
return error.GrabConnectionError(ex.args[1], ex)
elif ex.args[0] == 67:
return error.GrabAuthError(ex.args[1], ex)
elif ex.args[0] == 47:
return error.GrabTooManyRedirectsError(ex.args[1], ex)
elif ex.args[0] == 6:
return error.GrabCouldNotResolveHostError(ex.args[1], ex)
elif ex.args[0] == 3:
return error.GrabInvalidUrl(ex.args[1], ex)
else:
return error.GrabNetworkError(ex.args[1], ex) | [
"def",
"build_grab_exception",
"(",
"ex",
",",
"curl",
")",
":",
"# CURLE_WRITE_ERROR (23)",
"# An error occurred when writing received data to a local file, or",
"# an error was returned to libcurl from a write callback.",
"# This exception should be ignored if grab_callback_interrupted",
"... | Build Grab exception from the pycurl exception
Args:
ex - the original pycurl exception
curl - the Curl instance raised the exception | [
"Build",
"Grab",
"exception",
"from",
"the",
"pycurl",
"exception"
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/transport/curl.py#L587-L630 | train | 214,849 |
lorien/grab | grab/transport/curl.py | CurlTransport.body_processor | def body_processor(self, chunk):
"""
Process body of response.
"""
if self.config_nobody:
self.curl.grab_callback_interrupted = True
return 0
bytes_read = len(chunk)
self.response_body_bytes_read += bytes_read
if self.body_file:
self.body_file.write(chunk)
else:
self.response_body_chunks.append(chunk)
if self.config_body_maxsize is not None:
if self.response_body_bytes_read > self.config_body_maxsize:
logger.debug('Response body max size limit reached: %s',
self.config_body_maxsize)
self.curl.grab_callback_interrupted = True
return 0
# Returning None implies that all bytes were written
return None | python | def body_processor(self, chunk):
"""
Process body of response.
"""
if self.config_nobody:
self.curl.grab_callback_interrupted = True
return 0
bytes_read = len(chunk)
self.response_body_bytes_read += bytes_read
if self.body_file:
self.body_file.write(chunk)
else:
self.response_body_chunks.append(chunk)
if self.config_body_maxsize is not None:
if self.response_body_bytes_read > self.config_body_maxsize:
logger.debug('Response body max size limit reached: %s',
self.config_body_maxsize)
self.curl.grab_callback_interrupted = True
return 0
# Returning None implies that all bytes were written
return None | [
"def",
"body_processor",
"(",
"self",
",",
"chunk",
")",
":",
"if",
"self",
".",
"config_nobody",
":",
"self",
".",
"curl",
".",
"grab_callback_interrupted",
"=",
"True",
"return",
"0",
"bytes_read",
"=",
"len",
"(",
"chunk",
")",
"self",
".",
"response_bo... | Process body of response. | [
"Process",
"body",
"of",
"response",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/transport/curl.py#L134-L158 | train | 214,850 |
lorien/grab | grab/transport/curl.py | CurlTransport.debug_processor | def debug_processor(self, _type, text):
"""
Process request details.
0: CURLINFO_TEXT
1: CURLINFO_HEADER_IN
2: CURLINFO_HEADER_OUT
3: CURLINFO_DATA_IN
4: CURLINFO_DATA_OUT
5: CURLINFO_unrecognized_type
"""
if _type == pycurl.INFOTYPE_HEADER_OUT:
if isinstance(text, six.text_type):
text = text.encode('utf-8')
self.request_head += text
if _type == pycurl.INFOTYPE_DATA_OUT:
# Untill 7.19.5.2 version
# pycurl gives unicode in `text` variable
# WTF??? Probably that codes would fails
# or does unexpected things if you use
# pycurl<7.19.5.2
if isinstance(text, six.text_type):
text = text.encode('utf-8')
self.request_body += text
#if _type == pycurl.INFOTYPE_TEXT:
# if self.request_log is None:
# self.request_log = ''
# self.request_log += text
if self.verbose_logging:
if _type in (pycurl.INFOTYPE_TEXT, pycurl.INFOTYPE_HEADER_IN,
pycurl.INFOTYPE_HEADER_OUT):
marker_types = {
pycurl.INFOTYPE_TEXT: 'i',
pycurl.INFOTYPE_HEADER_IN: '<',
pycurl.INFOTYPE_HEADER_OUT: '>',
}
marker = marker_types[_type]
logger.debug('%s: %s', marker, text.rstrip()) | python | def debug_processor(self, _type, text):
"""
Process request details.
0: CURLINFO_TEXT
1: CURLINFO_HEADER_IN
2: CURLINFO_HEADER_OUT
3: CURLINFO_DATA_IN
4: CURLINFO_DATA_OUT
5: CURLINFO_unrecognized_type
"""
if _type == pycurl.INFOTYPE_HEADER_OUT:
if isinstance(text, six.text_type):
text = text.encode('utf-8')
self.request_head += text
if _type == pycurl.INFOTYPE_DATA_OUT:
# Untill 7.19.5.2 version
# pycurl gives unicode in `text` variable
# WTF??? Probably that codes would fails
# or does unexpected things if you use
# pycurl<7.19.5.2
if isinstance(text, six.text_type):
text = text.encode('utf-8')
self.request_body += text
#if _type == pycurl.INFOTYPE_TEXT:
# if self.request_log is None:
# self.request_log = ''
# self.request_log += text
if self.verbose_logging:
if _type in (pycurl.INFOTYPE_TEXT, pycurl.INFOTYPE_HEADER_IN,
pycurl.INFOTYPE_HEADER_OUT):
marker_types = {
pycurl.INFOTYPE_TEXT: 'i',
pycurl.INFOTYPE_HEADER_IN: '<',
pycurl.INFOTYPE_HEADER_OUT: '>',
}
marker = marker_types[_type]
logger.debug('%s: %s', marker, text.rstrip()) | [
"def",
"debug_processor",
"(",
"self",
",",
"_type",
",",
"text",
")",
":",
"if",
"_type",
"==",
"pycurl",
".",
"INFOTYPE_HEADER_OUT",
":",
"if",
"isinstance",
"(",
"text",
",",
"six",
".",
"text_type",
")",
":",
"text",
"=",
"text",
".",
"encode",
"("... | Process request details.
0: CURLINFO_TEXT
1: CURLINFO_HEADER_IN
2: CURLINFO_HEADER_OUT
3: CURLINFO_DATA_IN
4: CURLINFO_DATA_OUT
5: CURLINFO_unrecognized_type | [
"Process",
"request",
"details",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/transport/curl.py#L160-L200 | train | 214,851 |
lorien/grab | grab/transport/curl.py | CurlTransport.extract_cookiejar | def extract_cookiejar(self):
"""
Extract cookies that pycurl instance knows.
Returns `CookieJar` object.
"""
# Example of line:
# www.google.com\tFALSE\t/accounts/\tFALSE\t0'
# \tGoogleAccountsLocale_session\ten
# Fields:
# * domain
# * whether or not all machines under that domain can
# read the cookie's information.
# * path
# * Secure Flag: whether or not a secure connection (HTTPS)
# is required to read the cookie.
# * exp. timestamp
# * name
# * value
cookiejar = CookieJar()
for line in self.curl.getinfo(pycurl.INFO_COOKIELIST):
values = line.split('\t')
domain = values[0].lower()
if domain.startswith('#httponly_'):
domain = domain.replace('#httponly_', '')
httponly = True
else:
httponly = False
# old
# cookies[values[-2]] = values[-1]
# new
cookie = create_cookie(
name=values[5],
value=values[6],
domain=domain,
path=values[2],
secure=values[3] == "TRUE",
expires=int(values[4]) if values[4] else None,
httponly=httponly,
)
cookiejar.set_cookie(cookie)
return cookiejar | python | def extract_cookiejar(self):
"""
Extract cookies that pycurl instance knows.
Returns `CookieJar` object.
"""
# Example of line:
# www.google.com\tFALSE\t/accounts/\tFALSE\t0'
# \tGoogleAccountsLocale_session\ten
# Fields:
# * domain
# * whether or not all machines under that domain can
# read the cookie's information.
# * path
# * Secure Flag: whether or not a secure connection (HTTPS)
# is required to read the cookie.
# * exp. timestamp
# * name
# * value
cookiejar = CookieJar()
for line in self.curl.getinfo(pycurl.INFO_COOKIELIST):
values = line.split('\t')
domain = values[0].lower()
if domain.startswith('#httponly_'):
domain = domain.replace('#httponly_', '')
httponly = True
else:
httponly = False
# old
# cookies[values[-2]] = values[-1]
# new
cookie = create_cookie(
name=values[5],
value=values[6],
domain=domain,
path=values[2],
secure=values[3] == "TRUE",
expires=int(values[4]) if values[4] else None,
httponly=httponly,
)
cookiejar.set_cookie(cookie)
return cookiejar | [
"def",
"extract_cookiejar",
"(",
"self",
")",
":",
"# Example of line:",
"# www.google.com\\tFALSE\\t/accounts/\\tFALSE\\t0'",
"# \\tGoogleAccountsLocale_session\\ten",
"# Fields:",
"# * domain",
"# * whether or not all machines under that domain can",
"# read the cookie's information.",
"... | Extract cookies that pycurl instance knows.
Returns `CookieJar` object. | [
"Extract",
"cookies",
"that",
"pycurl",
"instance",
"knows",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/transport/curl.py#L525-L567 | train | 214,852 |
lorien/grab | grab/util/log.py | default_logging | def default_logging(grab_log=None, # '/tmp/grab.log',
network_log=None, # '/tmp/grab.network.log',
level=logging.DEBUG, mode='a',
propagate_network_logger=False):
"""
Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file.
"""
logging.basicConfig(level=level)
network_logger = logging.getLogger('grab.network')
network_logger.propagate = propagate_network_logger
if network_log:
hdl = logging.FileHandler(network_log, mode)
network_logger.addHandler(hdl)
network_logger.setLevel(level)
grab_logger = logging.getLogger('grab')
if grab_log:
hdl = logging.FileHandler(grab_log, mode)
grab_logger.addHandler(hdl)
grab_logger.setLevel(level) | python | def default_logging(grab_log=None, # '/tmp/grab.log',
network_log=None, # '/tmp/grab.network.log',
level=logging.DEBUG, mode='a',
propagate_network_logger=False):
"""
Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file.
"""
logging.basicConfig(level=level)
network_logger = logging.getLogger('grab.network')
network_logger.propagate = propagate_network_logger
if network_log:
hdl = logging.FileHandler(network_log, mode)
network_logger.addHandler(hdl)
network_logger.setLevel(level)
grab_logger = logging.getLogger('grab')
if grab_log:
hdl = logging.FileHandler(grab_log, mode)
grab_logger.addHandler(hdl)
grab_logger.setLevel(level) | [
"def",
"default_logging",
"(",
"grab_log",
"=",
"None",
",",
"# '/tmp/grab.log',",
"network_log",
"=",
"None",
",",
"# '/tmp/grab.network.log',",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"mode",
"=",
"'a'",
",",
"propagate_network_logger",
"=",
"False",
")",
... | Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file. | [
"Customize",
"logging",
"output",
"to",
"display",
"all",
"log",
"messages",
"except",
"grab",
"network",
"logs",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/util/log.py#L7-L31 | train | 214,853 |
lorien/grab | grab/script/crawl.py | save_list | def save_list(lst, path):
"""
Save items from list to the file.
"""
with open(path, 'wb') as out:
lines = []
for item in lst:
if isinstance(item, (six.text_type, six.binary_type)):
lines.append(make_str(item))
else:
lines.append(make_str(json.dumps(item)))
out.write(b'\n'.join(lines) + b'\n') | python | def save_list(lst, path):
"""
Save items from list to the file.
"""
with open(path, 'wb') as out:
lines = []
for item in lst:
if isinstance(item, (six.text_type, six.binary_type)):
lines.append(make_str(item))
else:
lines.append(make_str(json.dumps(item)))
out.write(b'\n'.join(lines) + b'\n') | [
"def",
"save_list",
"(",
"lst",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"out",
":",
"lines",
"=",
"[",
"]",
"for",
"item",
"in",
"lst",
":",
"if",
"isinstance",
"(",
"item",
",",
"(",
"six",
".",
"text_type",
... | Save items from list to the file. | [
"Save",
"items",
"from",
"list",
"to",
"the",
"file",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/script/crawl.py#L57-L69 | train | 214,854 |
lorien/grab | grab/proxylist.py | parse_proxy_line | def parse_proxy_line(line):
"""
Parse proxy details from the raw text line.
The text line could be in one of the following formats:
* host:port
* host:port:username:password
"""
line = line.strip()
match = RE_SIMPLE_PROXY.search(line)
if match:
return match.group(1), match.group(2), None, None
match = RE_AUTH_PROXY.search(line)
if match:
host, port, user, pwd = match.groups()
return host, port, user, pwd
raise InvalidProxyLine('Invalid proxy line: %s' % line) | python | def parse_proxy_line(line):
"""
Parse proxy details from the raw text line.
The text line could be in one of the following formats:
* host:port
* host:port:username:password
"""
line = line.strip()
match = RE_SIMPLE_PROXY.search(line)
if match:
return match.group(1), match.group(2), None, None
match = RE_AUTH_PROXY.search(line)
if match:
host, port, user, pwd = match.groups()
return host, port, user, pwd
raise InvalidProxyLine('Invalid proxy line: %s' % line) | [
"def",
"parse_proxy_line",
"(",
"line",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"match",
"=",
"RE_SIMPLE_PROXY",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"return",
"match",
".",
"group",
"(",
"1",
")",
",",
"match",
".",
... | Parse proxy details from the raw text line.
The text line could be in one of the following formats:
* host:port
* host:port:username:password | [
"Parse",
"proxy",
"details",
"from",
"the",
"raw",
"text",
"line",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/proxylist.py#L32-L51 | train | 214,855 |
lorien/grab | grab/proxylist.py | parse_raw_list_data | def parse_raw_list_data(data, proxy_type='http', proxy_userpwd=None):
"""Iterate over proxy servers found in the raw data"""
if not isinstance(data, six.text_type):
data = data.decode('utf-8')
for orig_line in data.splitlines():
line = orig_line.strip().replace(' ', '')
if line and not line.startswith('#'):
try:
host, port, username, password = parse_proxy_line(line)
except InvalidProxyLine as ex:
logger.error(ex)
else:
if username is None and proxy_userpwd is not None:
username, password = proxy_userpwd.split(':')
yield Proxy(host, port, username, password, proxy_type) | python | def parse_raw_list_data(data, proxy_type='http', proxy_userpwd=None):
"""Iterate over proxy servers found in the raw data"""
if not isinstance(data, six.text_type):
data = data.decode('utf-8')
for orig_line in data.splitlines():
line = orig_line.strip().replace(' ', '')
if line and not line.startswith('#'):
try:
host, port, username, password = parse_proxy_line(line)
except InvalidProxyLine as ex:
logger.error(ex)
else:
if username is None and proxy_userpwd is not None:
username, password = proxy_userpwd.split(':')
yield Proxy(host, port, username, password, proxy_type) | [
"def",
"parse_raw_list_data",
"(",
"data",
",",
"proxy_type",
"=",
"'http'",
",",
"proxy_userpwd",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"six",
".",
"text_type",
")",
":",
"data",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
... | Iterate over proxy servers found in the raw data | [
"Iterate",
"over",
"proxy",
"servers",
"found",
"in",
"the",
"raw",
"data"
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/proxylist.py#L54-L68 | train | 214,856 |
lorien/grab | grab/proxylist.py | ProxyList.load | def load(self):
"""Load proxy list from configured proxy source"""
self._list = self._source.load()
self._list_iter = itertools.cycle(self._list) | python | def load(self):
"""Load proxy list from configured proxy source"""
self._list = self._source.load()
self._list_iter = itertools.cycle(self._list) | [
"def",
"load",
"(",
"self",
")",
":",
"self",
".",
"_list",
"=",
"self",
".",
"_source",
".",
"load",
"(",
")",
"self",
".",
"_list_iter",
"=",
"itertools",
".",
"cycle",
"(",
"self",
".",
"_list",
")"
] | Load proxy list from configured proxy source | [
"Load",
"proxy",
"list",
"from",
"configured",
"proxy",
"source"
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/proxylist.py#L156-L159 | train | 214,857 |
lorien/grab | grab/proxylist.py | ProxyList.get_random_proxy | def get_random_proxy(self):
"""Return random proxy"""
idx = randint(0, len(self._list) - 1)
return self._list[idx] | python | def get_random_proxy(self):
"""Return random proxy"""
idx = randint(0, len(self._list) - 1)
return self._list[idx] | [
"def",
"get_random_proxy",
"(",
"self",
")",
":",
"idx",
"=",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_list",
")",
"-",
"1",
")",
"return",
"self",
".",
"_list",
"[",
"idx",
"]"
] | Return random proxy | [
"Return",
"random",
"proxy"
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/proxylist.py#L161-L164 | train | 214,858 |
lorien/grab | grab/spider/task.py | Task.clone | def clone(self, **kwargs):
"""
Clone Task instance.
Reset network_try_count, increase task_try_count.
Reset priority attribute if it was not set explicitly.
"""
# First, create exact copy of the current Task object
attr_copy = self.__dict__.copy()
if attr_copy.get('grab_config') is not None:
del attr_copy['url']
if not attr_copy['priority_set_explicitly']:
attr_copy['priority'] = None
task = Task(**attr_copy)
# Reset some task properties if they have not
# been set explicitly in kwargs
if 'network_try_count' not in kwargs:
task.network_try_count = 0
if 'task_try_count' not in kwargs:
task.task_try_count = self.task_try_count + 1
if 'refresh_cache' not in kwargs:
task.refresh_cache = False
if 'disable_cache' not in kwargs:
task.disable_cache = False
if kwargs.get('url') is not None and kwargs.get('grab') is not None:
raise SpiderMisuseError('Options url and grab could not be '
'used together')
if (kwargs.get('url') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options url and grab_config could not '
'be used together')
if (kwargs.get('grab') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options grab and grab_config could not '
'be used together')
if kwargs.get('grab'):
task.setup_grab_config(kwargs['grab'].dump_config())
del kwargs['grab']
elif kwargs.get('grab_config'):
task.setup_grab_config(kwargs['grab_config'])
del kwargs['grab_config']
elif kwargs.get('url'):
task.url = kwargs['url']
if task.grab_config:
task.grab_config['url'] = kwargs['url']
del kwargs['url']
for key, value in kwargs.items():
setattr(task, key, value)
task.process_delay_option(None)
return task | python | def clone(self, **kwargs):
"""
Clone Task instance.
Reset network_try_count, increase task_try_count.
Reset priority attribute if it was not set explicitly.
"""
# First, create exact copy of the current Task object
attr_copy = self.__dict__.copy()
if attr_copy.get('grab_config') is not None:
del attr_copy['url']
if not attr_copy['priority_set_explicitly']:
attr_copy['priority'] = None
task = Task(**attr_copy)
# Reset some task properties if they have not
# been set explicitly in kwargs
if 'network_try_count' not in kwargs:
task.network_try_count = 0
if 'task_try_count' not in kwargs:
task.task_try_count = self.task_try_count + 1
if 'refresh_cache' not in kwargs:
task.refresh_cache = False
if 'disable_cache' not in kwargs:
task.disable_cache = False
if kwargs.get('url') is not None and kwargs.get('grab') is not None:
raise SpiderMisuseError('Options url and grab could not be '
'used together')
if (kwargs.get('url') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options url and grab_config could not '
'be used together')
if (kwargs.get('grab') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options grab and grab_config could not '
'be used together')
if kwargs.get('grab'):
task.setup_grab_config(kwargs['grab'].dump_config())
del kwargs['grab']
elif kwargs.get('grab_config'):
task.setup_grab_config(kwargs['grab_config'])
del kwargs['grab_config']
elif kwargs.get('url'):
task.url = kwargs['url']
if task.grab_config:
task.grab_config['url'] = kwargs['url']
del kwargs['url']
for key, value in kwargs.items():
setattr(task, key, value)
task.process_delay_option(None)
return task | [
"def",
"clone",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# First, create exact copy of the current Task object",
"attr_copy",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"if",
"attr_copy",
".",
"get",
"(",
"'grab_config'",
")",
"is",
"not",
... | Clone Task instance.
Reset network_try_count, increase task_try_count.
Reset priority attribute if it was not set explicitly. | [
"Clone",
"Task",
"instance",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/spider/task.py#L170-L228 | train | 214,859 |
lorien/grab | grab/base.py | copy_config | def copy_config(config, mutable_config_keys=MUTABLE_CONFIG_KEYS):
"""
Copy grab config with correct handling of mutable config values.
"""
cloned_config = copy(config)
# Apply ``copy`` function to mutable config values
for key in mutable_config_keys:
cloned_config[key] = copy(config[key])
return cloned_config | python | def copy_config(config, mutable_config_keys=MUTABLE_CONFIG_KEYS):
"""
Copy grab config with correct handling of mutable config values.
"""
cloned_config = copy(config)
# Apply ``copy`` function to mutable config values
for key in mutable_config_keys:
cloned_config[key] = copy(config[key])
return cloned_config | [
"def",
"copy_config",
"(",
"config",
",",
"mutable_config_keys",
"=",
"MUTABLE_CONFIG_KEYS",
")",
":",
"cloned_config",
"=",
"copy",
"(",
"config",
")",
"# Apply ``copy`` function to mutable config values",
"for",
"key",
"in",
"mutable_config_keys",
":",
"cloned_config",
... | Copy grab config with correct handling of mutable config values. | [
"Copy",
"grab",
"config",
"with",
"correct",
"handling",
"of",
"mutable",
"config",
"values",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L60-L69 | train | 214,860 |
lorien/grab | grab/base.py | Grab.reset | def reset(self):
"""
Reset all attributes which could be modified during previous request
or which is not initialized yet if this is the new Grab instance.
This methods is automatically called before each network request.
"""
self.request_head = None
#self.request_log = None
self.request_body = None
self.request_method = None
self.request_counter = None
self.exception = None
if self.transport:
self.transport.reset() | python | def reset(self):
"""
Reset all attributes which could be modified during previous request
or which is not initialized yet if this is the new Grab instance.
This methods is automatically called before each network request.
"""
self.request_head = None
#self.request_log = None
self.request_body = None
self.request_method = None
self.request_counter = None
self.exception = None
if self.transport:
self.transport.reset() | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"request_head",
"=",
"None",
"#self.request_log = None",
"self",
".",
"request_body",
"=",
"None",
"self",
".",
"request_method",
"=",
"None",
"self",
".",
"request_counter",
"=",
"None",
"self",
".",
"exce... | Reset all attributes which could be modified during previous request
or which is not initialized yet if this is the new Grab instance.
This methods is automatically called before each network request. | [
"Reset",
"all",
"attributes",
"which",
"could",
"be",
"modified",
"during",
"previous",
"request",
"or",
"which",
"is",
"not",
"initialized",
"yet",
"if",
"this",
"is",
"the",
"new",
"Grab",
"instance",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L280-L295 | train | 214,861 |
lorien/grab | grab/base.py | Grab.clone | def clone(self, **kwargs):
"""
Create clone of Grab instance.
Cloned instance will have the same state: cookies, referrer, response
document data
:param **kwargs: overrides settings of cloned grab instance
"""
grab = Grab(transport=self.transport_param)
grab.config = self.dump_config()
grab.doc = self.doc.copy()
#grab.doc.grab = weakref.proxy(grab)
for key in self.clonable_attributes:
setattr(grab, key, getattr(self, key))
grab.cookies = deepcopy(self.cookies)
if kwargs:
grab.setup(**kwargs)
return grab | python | def clone(self, **kwargs):
"""
Create clone of Grab instance.
Cloned instance will have the same state: cookies, referrer, response
document data
:param **kwargs: overrides settings of cloned grab instance
"""
grab = Grab(transport=self.transport_param)
grab.config = self.dump_config()
grab.doc = self.doc.copy()
#grab.doc.grab = weakref.proxy(grab)
for key in self.clonable_attributes:
setattr(grab, key, getattr(self, key))
grab.cookies = deepcopy(self.cookies)
if kwargs:
grab.setup(**kwargs)
return grab | [
"def",
"clone",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"grab",
"=",
"Grab",
"(",
"transport",
"=",
"self",
".",
"transport_param",
")",
"grab",
".",
"config",
"=",
"self",
".",
"dump_config",
"(",
")",
"grab",
".",
"doc",
"=",
"self",
".",... | Create clone of Grab instance.
Cloned instance will have the same state: cookies, referrer, response
document data
:param **kwargs: overrides settings of cloned grab instance | [
"Create",
"clone",
"of",
"Grab",
"instance",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L297-L320 | train | 214,862 |
lorien/grab | grab/base.py | Grab.adopt | def adopt(self, grab):
"""
Copy the state of another `Grab` instance.
Use case: create backup of current state to the cloned instance and
then restore the state from it.
"""
self.load_config(grab.config)
self.doc = grab.doc.copy(new_grab=self)
for key in self.clonable_attributes:
setattr(self, key, getattr(grab, key))
self.cookies = deepcopy(grab.cookies) | python | def adopt(self, grab):
"""
Copy the state of another `Grab` instance.
Use case: create backup of current state to the cloned instance and
then restore the state from it.
"""
self.load_config(grab.config)
self.doc = grab.doc.copy(new_grab=self)
for key in self.clonable_attributes:
setattr(self, key, getattr(grab, key))
self.cookies = deepcopy(grab.cookies) | [
"def",
"adopt",
"(",
"self",
",",
"grab",
")",
":",
"self",
".",
"load_config",
"(",
"grab",
".",
"config",
")",
"self",
".",
"doc",
"=",
"grab",
".",
"doc",
".",
"copy",
"(",
"new_grab",
"=",
"self",
")",
"for",
"key",
"in",
"self",
".",
"clonab... | Copy the state of another `Grab` instance.
Use case: create backup of current state to the cloned instance and
then restore the state from it. | [
"Copy",
"the",
"state",
"of",
"another",
"Grab",
"instance",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L322-L336 | train | 214,863 |
lorien/grab | grab/base.py | Grab.dump_config | def dump_config(self):
"""
Make clone of current config.
"""
conf = copy_config(self.config, self.mutable_config_keys)
conf['state'] = {
'cookiejar_cookies': list(self.cookies.cookiejar),
}
return conf | python | def dump_config(self):
"""
Make clone of current config.
"""
conf = copy_config(self.config, self.mutable_config_keys)
conf['state'] = {
'cookiejar_cookies': list(self.cookies.cookiejar),
}
return conf | [
"def",
"dump_config",
"(",
"self",
")",
":",
"conf",
"=",
"copy_config",
"(",
"self",
".",
"config",
",",
"self",
".",
"mutable_config_keys",
")",
"conf",
"[",
"'state'",
"]",
"=",
"{",
"'cookiejar_cookies'",
":",
"list",
"(",
"self",
".",
"cookies",
"."... | Make clone of current config. | [
"Make",
"clone",
"of",
"current",
"config",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L338-L347 | train | 214,864 |
lorien/grab | grab/base.py | Grab.load_config | def load_config(self, config):
"""
Configure grab instance with external config object.
"""
self.config = copy_config(config, self.mutable_config_keys)
if 'cookiejar_cookies' in config['state']:
self.cookies = CookieManager.from_cookie_list(
config['state']['cookiejar_cookies']) | python | def load_config(self, config):
"""
Configure grab instance with external config object.
"""
self.config = copy_config(config, self.mutable_config_keys)
if 'cookiejar_cookies' in config['state']:
self.cookies = CookieManager.from_cookie_list(
config['state']['cookiejar_cookies']) | [
"def",
"load_config",
"(",
"self",
",",
"config",
")",
":",
"self",
".",
"config",
"=",
"copy_config",
"(",
"config",
",",
"self",
".",
"mutable_config_keys",
")",
"if",
"'cookiejar_cookies'",
"in",
"config",
"[",
"'state'",
"]",
":",
"self",
".",
"cookies... | Configure grab instance with external config object. | [
"Configure",
"grab",
"instance",
"with",
"external",
"config",
"object",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L349-L357 | train | 214,865 |
lorien/grab | grab/base.py | Grab.setup | def setup(self, **kwargs):
"""
Setting up Grab instance configuration.
"""
for key in kwargs:
if key not in self.config.keys():
raise error.GrabMisuseError('Unknown option: %s' % key)
if 'url' in kwargs:
if self.config.get('url'):
kwargs['url'] = self.make_url_absolute(kwargs['url'])
self.config.update(kwargs) | python | def setup(self, **kwargs):
"""
Setting up Grab instance configuration.
"""
for key in kwargs:
if key not in self.config.keys():
raise error.GrabMisuseError('Unknown option: %s' % key)
if 'url' in kwargs:
if self.config.get('url'):
kwargs['url'] = self.make_url_absolute(kwargs['url'])
self.config.update(kwargs) | [
"def",
"setup",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"key",
"in",
"kwargs",
":",
"if",
"key",
"not",
"in",
"self",
".",
"config",
".",
"keys",
"(",
")",
":",
"raise",
"error",
".",
"GrabMisuseError",
"(",
"'Unknown option: %s'",
"%"... | Setting up Grab instance configuration. | [
"Setting",
"up",
"Grab",
"instance",
"configuration",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L359-L371 | train | 214,866 |
lorien/grab | grab/base.py | Grab.download | def download(self, url, location, **kwargs):
"""
Fetch document located at ``url`` and save to to ``location``.
"""
doc = self.go(url, **kwargs)
with open(location, 'wb') as out:
out.write(doc.body)
return len(doc.body) | python | def download(self, url, location, **kwargs):
"""
Fetch document located at ``url`` and save to to ``location``.
"""
doc = self.go(url, **kwargs)
with open(location, 'wb') as out:
out.write(doc.body)
return len(doc.body) | [
"def",
"download",
"(",
"self",
",",
"url",
",",
"location",
",",
"*",
"*",
"kwargs",
")",
":",
"doc",
"=",
"self",
".",
"go",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"with",
"open",
"(",
"location",
",",
"'wb'",
")",
"as",
"out",
":",
"out",... | Fetch document located at ``url`` and save to to ``location``. | [
"Fetch",
"document",
"located",
"at",
"url",
"and",
"save",
"to",
"to",
"location",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L384-L392 | train | 214,867 |
lorien/grab | grab/base.py | Grab.prepare_request | def prepare_request(self, **kwargs):
"""
Configure all things to make real network request.
This method is called before doing real request via
transport extension.
"""
if self.transport is None:
self.setup_transport(self.transport_param)
self.reset()
self.request_counter = next(REQUEST_COUNTER)
if kwargs:
self.setup(**kwargs)
if self.proxylist.size() and self.config['proxy_auto_change']:
self.change_proxy()
self.request_method = self.detect_request_method()
self.transport.process_config(self) | python | def prepare_request(self, **kwargs):
"""
Configure all things to make real network request.
This method is called before doing real request via
transport extension.
"""
if self.transport is None:
self.setup_transport(self.transport_param)
self.reset()
self.request_counter = next(REQUEST_COUNTER)
if kwargs:
self.setup(**kwargs)
if self.proxylist.size() and self.config['proxy_auto_change']:
self.change_proxy()
self.request_method = self.detect_request_method()
self.transport.process_config(self) | [
"def",
"prepare_request",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"transport",
"is",
"None",
":",
"self",
".",
"setup_transport",
"(",
"self",
".",
"transport_param",
")",
"self",
".",
"reset",
"(",
")",
"self",
".",
"request... | Configure all things to make real network request.
This method is called before doing real request via
transport extension. | [
"Configure",
"all",
"things",
"to",
"make",
"real",
"network",
"request",
".",
"This",
"method",
"is",
"called",
"before",
"doing",
"real",
"request",
"via",
"transport",
"extension",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L394-L410 | train | 214,868 |
lorien/grab | grab/base.py | Grab.log_request | def log_request(self, extra=''):
"""
Send request details to logging system.
"""
# pylint: disable=no-member
thread_name = threading.currentThread().getName().lower()
# pylint: enable=no-member
if thread_name == 'mainthread':
thread_name = ''
else:
thread_name = '-%s' % thread_name
if self.config['proxy']:
if self.config['proxy_userpwd']:
auth = ' with authorization'
else:
auth = ''
proxy_info = ' via %s proxy of type %s%s' % (
self.config['proxy'], self.config['proxy_type'], auth)
else:
proxy_info = ''
if extra:
extra = '[%s] ' % extra
logger_network.debug(
'[%s%s] %s%s %s%s',
('%02d' % self.request_counter
if self.request_counter is not None else 'NA'),
thread_name,
extra, self.request_method or 'GET',
self.config['url'], proxy_info) | python | def log_request(self, extra=''):
"""
Send request details to logging system.
"""
# pylint: disable=no-member
thread_name = threading.currentThread().getName().lower()
# pylint: enable=no-member
if thread_name == 'mainthread':
thread_name = ''
else:
thread_name = '-%s' % thread_name
if self.config['proxy']:
if self.config['proxy_userpwd']:
auth = ' with authorization'
else:
auth = ''
proxy_info = ' via %s proxy of type %s%s' % (
self.config['proxy'], self.config['proxy_type'], auth)
else:
proxy_info = ''
if extra:
extra = '[%s] ' % extra
logger_network.debug(
'[%s%s] %s%s %s%s',
('%02d' % self.request_counter
if self.request_counter is not None else 'NA'),
thread_name,
extra, self.request_method or 'GET',
self.config['url'], proxy_info) | [
"def",
"log_request",
"(",
"self",
",",
"extra",
"=",
"''",
")",
":",
"# pylint: disable=no-member",
"thread_name",
"=",
"threading",
".",
"currentThread",
"(",
")",
".",
"getName",
"(",
")",
".",
"lower",
"(",
")",
"# pylint: enable=no-member",
"if",
"thread_... | Send request details to logging system. | [
"Send",
"request",
"details",
"to",
"logging",
"system",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L412-L442 | train | 214,869 |
lorien/grab | grab/base.py | Grab.request | def request(self, **kwargs):
"""
Perform network request.
You can specify grab settings in ``**kwargs``.
Any keyword argument will be passed to ``self.config``.
Returns: ``Document`` objects.
"""
self.prepare_request(**kwargs)
refresh_count = 0
while True:
self.log_request()
try:
self.transport.request()
except error.GrabError as ex:
self.exception = ex
self.reset_temporary_options()
if self.config['log_dir']:
self.save_failed_dump()
raise
else:
doc = self.process_request_result()
if self.config['follow_location']:
if doc.code in (301, 302, 303, 307, 308):
if doc.headers.get('Location'):
refresh_count += 1
if refresh_count > self.config['redirect_limit']:
raise error.GrabTooManyRedirectsError()
else:
url = doc.headers.get('Location')
self.prepare_request(
url=self.make_url_absolute(url),
referer=None)
continue
if self.config['follow_refresh']:
refresh_url = self.doc.get_meta_refresh_url()
if refresh_url is not None:
refresh_count += 1
if refresh_count > self.config['redirect_limit']:
raise error.GrabTooManyRedirectsError()
else:
self.prepare_request(
url=self.make_url_absolute(refresh_url),
referer=None)
continue
return doc | python | def request(self, **kwargs):
"""
Perform network request.
You can specify grab settings in ``**kwargs``.
Any keyword argument will be passed to ``self.config``.
Returns: ``Document`` objects.
"""
self.prepare_request(**kwargs)
refresh_count = 0
while True:
self.log_request()
try:
self.transport.request()
except error.GrabError as ex:
self.exception = ex
self.reset_temporary_options()
if self.config['log_dir']:
self.save_failed_dump()
raise
else:
doc = self.process_request_result()
if self.config['follow_location']:
if doc.code in (301, 302, 303, 307, 308):
if doc.headers.get('Location'):
refresh_count += 1
if refresh_count > self.config['redirect_limit']:
raise error.GrabTooManyRedirectsError()
else:
url = doc.headers.get('Location')
self.prepare_request(
url=self.make_url_absolute(url),
referer=None)
continue
if self.config['follow_refresh']:
refresh_url = self.doc.get_meta_refresh_url()
if refresh_url is not None:
refresh_count += 1
if refresh_count > self.config['redirect_limit']:
raise error.GrabTooManyRedirectsError()
else:
self.prepare_request(
url=self.make_url_absolute(refresh_url),
referer=None)
continue
return doc | [
"def",
"request",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"prepare_request",
"(",
"*",
"*",
"kwargs",
")",
"refresh_count",
"=",
"0",
"while",
"True",
":",
"self",
".",
"log_request",
"(",
")",
"try",
":",
"self",
".",
"transport... | Perform network request.
You can specify grab settings in ``**kwargs``.
Any keyword argument will be passed to ``self.config``.
Returns: ``Document`` objects. | [
"Perform",
"network",
"request",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L444-L495 | train | 214,870 |
lorien/grab | grab/base.py | Grab.submit | def submit(self, make_request=True, **kwargs):
"""
Submit current form.
:param make_request: if `False` then grab instance will be
configured with form post data but request will not be
performed
For details see `Document.submit()` method
Example::
# Assume that we going to some page with some form
g.go('some url')
# Fill some fields
g.doc.set_input('username', 'bob')
g.doc.set_input('pwd', '123')
# Submit the form
g.submit()
# or we can just fill the form
# and do manual submission
g.doc.set_input('foo', 'bar')
g.submit(make_request=False)
g.request()
# for multipart forms we can specify files
from grab import UploadFile
g.doc.set_input('img', UploadFile('/path/to/image.png'))
g.submit()
"""
result = self.doc.get_form_request(**kwargs)
if result['multipart_post']:
self.setup(multipart_post=result['multipart_post'])
if result['post']:
self.setup(post=result['post'])
if result['url']:
self.setup(url=result['url'])
if make_request:
return self.request()
else:
return None | python | def submit(self, make_request=True, **kwargs):
"""
Submit current form.
:param make_request: if `False` then grab instance will be
configured with form post data but request will not be
performed
For details see `Document.submit()` method
Example::
# Assume that we going to some page with some form
g.go('some url')
# Fill some fields
g.doc.set_input('username', 'bob')
g.doc.set_input('pwd', '123')
# Submit the form
g.submit()
# or we can just fill the form
# and do manual submission
g.doc.set_input('foo', 'bar')
g.submit(make_request=False)
g.request()
# for multipart forms we can specify files
from grab import UploadFile
g.doc.set_input('img', UploadFile('/path/to/image.png'))
g.submit()
"""
result = self.doc.get_form_request(**kwargs)
if result['multipart_post']:
self.setup(multipart_post=result['multipart_post'])
if result['post']:
self.setup(post=result['post'])
if result['url']:
self.setup(url=result['url'])
if make_request:
return self.request()
else:
return None | [
"def",
"submit",
"(",
"self",
",",
"make_request",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"doc",
".",
"get_form_request",
"(",
"*",
"*",
"kwargs",
")",
"if",
"result",
"[",
"'multipart_post'",
"]",
":",
"self",
".... | Submit current form.
:param make_request: if `False` then grab instance will be
configured with form post data but request will not be
performed
For details see `Document.submit()` method
Example::
# Assume that we going to some page with some form
g.go('some url')
# Fill some fields
g.doc.set_input('username', 'bob')
g.doc.set_input('pwd', '123')
# Submit the form
g.submit()
# or we can just fill the form
# and do manual submission
g.doc.set_input('foo', 'bar')
g.submit(make_request=False)
g.request()
# for multipart forms we can specify files
from grab import UploadFile
g.doc.set_input('img', UploadFile('/path/to/image.png'))
g.submit() | [
"Submit",
"current",
"form",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L497-L538 | train | 214,871 |
lorien/grab | grab/base.py | Grab.process_request_result | def process_request_result(self, prepare_response_func=None):
"""
Process result of real request performed via transport extension.
"""
now = datetime.utcnow()
# TODO: move into separate method
if self.config['debug_post']:
post = self.config['post'] or self.config['multipart_post']
if isinstance(post, dict):
post = list(post.items())
if post:
if isinstance(post, six.string_types):
post = make_str(post[:self.config['debug_post_limit']],
errors='ignore') + b'...'
else:
items = normalize_http_values(
post, charset=self.config['charset'])
new_items = []
for key, value in items:
if len(value) > self.config['debug_post_limit']:
value = value[
:self.config['debug_post_limit']] + b'...'
else:
value = value
new_items.append((key, value))
post = '\n'.join('%-25s: %s' % x for x in new_items)
if post:
logger_network.debug('[%02d] POST request:\n%s\n',
self.request_counter, post)
# It's important to delete old POST data after request is performed.
# If POST data is not cleared then next request will try to use them
# again!
self.reset_temporary_options()
if prepare_response_func:
self.doc = prepare_response_func(self.transport, self)
else:
self.doc = self.transport.prepare_response(self)
self.doc.process_grab(self)
if self.config['reuse_cookies']:
self.cookies.update(self.doc.cookies)
self.doc.timestamp = now
self.config['charset'] = self.doc.charset
if self.config['log_file']:
with open(self.config['log_file'], 'wb') as out:
out.write(self.doc.body)
if self.config['cookiefile']:
self.cookies.save_to_file(self.config['cookiefile'])
if self.config['reuse_referer']:
self.config['referer'] = self.doc.url
self.copy_request_data()
# Should be called after `copy_request_data`
if self.config['log_dir']:
self.save_dumps()
return self.doc | python | def process_request_result(self, prepare_response_func=None):
"""
Process result of real request performed via transport extension.
"""
now = datetime.utcnow()
# TODO: move into separate method
if self.config['debug_post']:
post = self.config['post'] or self.config['multipart_post']
if isinstance(post, dict):
post = list(post.items())
if post:
if isinstance(post, six.string_types):
post = make_str(post[:self.config['debug_post_limit']],
errors='ignore') + b'...'
else:
items = normalize_http_values(
post, charset=self.config['charset'])
new_items = []
for key, value in items:
if len(value) > self.config['debug_post_limit']:
value = value[
:self.config['debug_post_limit']] + b'...'
else:
value = value
new_items.append((key, value))
post = '\n'.join('%-25s: %s' % x for x in new_items)
if post:
logger_network.debug('[%02d] POST request:\n%s\n',
self.request_counter, post)
# It's important to delete old POST data after request is performed.
# If POST data is not cleared then next request will try to use them
# again!
self.reset_temporary_options()
if prepare_response_func:
self.doc = prepare_response_func(self.transport, self)
else:
self.doc = self.transport.prepare_response(self)
self.doc.process_grab(self)
if self.config['reuse_cookies']:
self.cookies.update(self.doc.cookies)
self.doc.timestamp = now
self.config['charset'] = self.doc.charset
if self.config['log_file']:
with open(self.config['log_file'], 'wb') as out:
out.write(self.doc.body)
if self.config['cookiefile']:
self.cookies.save_to_file(self.config['cookiefile'])
if self.config['reuse_referer']:
self.config['referer'] = self.doc.url
self.copy_request_data()
# Should be called after `copy_request_data`
if self.config['log_dir']:
self.save_dumps()
return self.doc | [
"def",
"process_request_result",
"(",
"self",
",",
"prepare_response_func",
"=",
"None",
")",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"# TODO: move into separate method",
"if",
"self",
".",
"config",
"[",
"'debug_post'",
"]",
":",
"post",
"=",
"s... | Process result of real request performed via transport extension. | [
"Process",
"result",
"of",
"real",
"request",
"performed",
"via",
"transport",
"extension",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L540-L606 | train | 214,872 |
lorien/grab | grab/base.py | Grab.save_failed_dump | def save_failed_dump(self):
"""
Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure.
"""
# try/except for safety, to not break live spiders
try:
# FIXME
if (self.transport.__class__.__name__ == 'Urllib3Transport'
and not getattr(self.transport, '_response', None)):
self.doc = None
else:
self.doc = self.transport.prepare_response(self)
self.copy_request_data()
self.save_dumps()
except Exception as ex: # pylint: disable=broad-except
logger.error('', exc_info=ex) | python | def save_failed_dump(self):
"""
Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure.
"""
# try/except for safety, to not break live spiders
try:
# FIXME
if (self.transport.__class__.__name__ == 'Urllib3Transport'
and not getattr(self.transport, '_response', None)):
self.doc = None
else:
self.doc = self.transport.prepare_response(self)
self.copy_request_data()
self.save_dumps()
except Exception as ex: # pylint: disable=broad-except
logger.error('', exc_info=ex) | [
"def",
"save_failed_dump",
"(",
"self",
")",
":",
"# try/except for safety, to not break live spiders",
"try",
":",
"# FIXME",
"if",
"(",
"self",
".",
"transport",
".",
"__class__",
".",
"__name__",
"==",
"'Urllib3Transport'",
"and",
"not",
"getattr",
"(",
"self",
... | Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure. | [
"Save",
"dump",
"of",
"failed",
"request",
"for",
"debugging",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L614-L633 | train | 214,873 |
lorien/grab | grab/base.py | Grab.setup_document | def setup_document(self, content, **kwargs):
"""
Setup `response` object without real network requests.
Useful for testing and debuging.
All ``**kwargs`` will be passed to `Document` constructor.
"""
self.reset()
if isinstance(content, six.text_type):
raise error.GrabMisuseError('Method `setup_document` accepts only '
'byte string in `content` argument.')
# Configure Document instance
doc = Document(grab=self)
doc.body = content
doc.status = ''
doc.head = b'HTTP/1.1 200 OK\r\n\r\n'
doc.parse(charset=kwargs.get('document_charset'))
doc.code = 200
doc.total_time = 0
doc.connect_time = 0
doc.name_lookup_time = 0
doc.url = ''
for key, value in kwargs.items():
setattr(doc, key, value)
self.doc = doc | python | def setup_document(self, content, **kwargs):
"""
Setup `response` object without real network requests.
Useful for testing and debuging.
All ``**kwargs`` will be passed to `Document` constructor.
"""
self.reset()
if isinstance(content, six.text_type):
raise error.GrabMisuseError('Method `setup_document` accepts only '
'byte string in `content` argument.')
# Configure Document instance
doc = Document(grab=self)
doc.body = content
doc.status = ''
doc.head = b'HTTP/1.1 200 OK\r\n\r\n'
doc.parse(charset=kwargs.get('document_charset'))
doc.code = 200
doc.total_time = 0
doc.connect_time = 0
doc.name_lookup_time = 0
doc.url = ''
for key, value in kwargs.items():
setattr(doc, key, value)
self.doc = doc | [
"def",
"setup_document",
"(",
"self",
",",
"content",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"reset",
"(",
")",
"if",
"isinstance",
"(",
"content",
",",
"six",
".",
"text_type",
")",
":",
"raise",
"error",
".",
"GrabMisuseError",
"(",
"'Method... | Setup `response` object without real network requests.
Useful for testing and debuging.
All ``**kwargs`` will be passed to `Document` constructor. | [
"Setup",
"response",
"object",
"without",
"real",
"network",
"requests",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L641-L670 | train | 214,874 |
lorien/grab | grab/base.py | Grab.change_proxy | def change_proxy(self, random=True):
"""
Set random proxy from proxylist.
"""
if self.proxylist.size():
if random:
proxy = self.proxylist.get_random_proxy()
else:
proxy = self.proxylist.get_next_proxy()
self.setup(proxy=proxy.get_address(),
proxy_userpwd=proxy.get_userpwd(),
proxy_type=proxy.proxy_type)
else:
logger.debug('Proxy list is empty') | python | def change_proxy(self, random=True):
"""
Set random proxy from proxylist.
"""
if self.proxylist.size():
if random:
proxy = self.proxylist.get_random_proxy()
else:
proxy = self.proxylist.get_next_proxy()
self.setup(proxy=proxy.get_address(),
proxy_userpwd=proxy.get_userpwd(),
proxy_type=proxy.proxy_type)
else:
logger.debug('Proxy list is empty') | [
"def",
"change_proxy",
"(",
"self",
",",
"random",
"=",
"True",
")",
":",
"if",
"self",
".",
"proxylist",
".",
"size",
"(",
")",
":",
"if",
"random",
":",
"proxy",
"=",
"self",
".",
"proxylist",
".",
"get_random_proxy",
"(",
")",
"else",
":",
"proxy"... | Set random proxy from proxylist. | [
"Set",
"random",
"proxy",
"from",
"proxylist",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L672-L686 | train | 214,875 |
lorien/grab | grab/base.py | Grab.make_url_absolute | def make_url_absolute(self, url, resolve_base=False):
"""
Make url absolute using previous request url as base url.
"""
if self.config['url']:
if resolve_base:
ubody = self.doc.unicode_body()
base_url = find_base_url(ubody)
if base_url:
return urljoin(base_url, url)
return urljoin(self.config['url'], url)
else:
return url | python | def make_url_absolute(self, url, resolve_base=False):
"""
Make url absolute using previous request url as base url.
"""
if self.config['url']:
if resolve_base:
ubody = self.doc.unicode_body()
base_url = find_base_url(ubody)
if base_url:
return urljoin(base_url, url)
return urljoin(self.config['url'], url)
else:
return url | [
"def",
"make_url_absolute",
"(",
"self",
",",
"url",
",",
"resolve_base",
"=",
"False",
")",
":",
"if",
"self",
".",
"config",
"[",
"'url'",
"]",
":",
"if",
"resolve_base",
":",
"ubody",
"=",
"self",
".",
"doc",
".",
"unicode_body",
"(",
")",
"base_url... | Make url absolute using previous request url as base url. | [
"Make",
"url",
"absolute",
"using",
"previous",
"request",
"url",
"as",
"base",
"url",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L734-L747 | train | 214,876 |
lorien/grab | grab/base.py | Grab.detect_request_method | def detect_request_method(self):
"""
Analyze request config and find which
request method will be used.
Returns request method in upper case
This method needs simetime when `process_config` method
was not called yet.
"""
method = self.config['method']
if method:
method = method.upper()
else:
if self.config['post'] or self.config['multipart_post']:
method = 'POST'
else:
method = 'GET'
return method | python | def detect_request_method(self):
"""
Analyze request config and find which
request method will be used.
Returns request method in upper case
This method needs simetime when `process_config` method
was not called yet.
"""
method = self.config['method']
if method:
method = method.upper()
else:
if self.config['post'] or self.config['multipart_post']:
method = 'POST'
else:
method = 'GET'
return method | [
"def",
"detect_request_method",
"(",
"self",
")",
":",
"method",
"=",
"self",
".",
"config",
"[",
"'method'",
"]",
"if",
"method",
":",
"method",
"=",
"method",
".",
"upper",
"(",
")",
"else",
":",
"if",
"self",
".",
"config",
"[",
"'post'",
"]",
"or... | Analyze request config and find which
request method will be used.
Returns request method in upper case
This method needs simetime when `process_config` method
was not called yet. | [
"Analyze",
"request",
"config",
"and",
"find",
"which",
"request",
"method",
"will",
"be",
"used",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L749-L768 | train | 214,877 |
lorien/grab | grab/cookie.py | create_cookie | def create_cookie(name, value, domain, httponly=None, **kwargs):
"""Creates `cookielib.Cookie` instance"""
if domain == 'localhost':
domain = ''
config = dict(
name=name,
value=value,
version=0,
port=None,
domain=domain,
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rfc2109=False,
rest={'HttpOnly': httponly},
)
for key in kwargs:
if key not in config:
raise GrabMisuseError('Function `create_cookie` does not accept '
'`%s` argument' % key)
config.update(**kwargs)
config['rest']['HttpOnly'] = httponly
config['port_specified'] = bool(config['port'])
config['domain_specified'] = bool(config['domain'])
config['domain_initial_dot'] = (config['domain'] or '').startswith('.')
config['path_specified'] = bool(config['path'])
return Cookie(**config) | python | def create_cookie(name, value, domain, httponly=None, **kwargs):
"""Creates `cookielib.Cookie` instance"""
if domain == 'localhost':
domain = ''
config = dict(
name=name,
value=value,
version=0,
port=None,
domain=domain,
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rfc2109=False,
rest={'HttpOnly': httponly},
)
for key in kwargs:
if key not in config:
raise GrabMisuseError('Function `create_cookie` does not accept '
'`%s` argument' % key)
config.update(**kwargs)
config['rest']['HttpOnly'] = httponly
config['port_specified'] = bool(config['port'])
config['domain_specified'] = bool(config['domain'])
config['domain_initial_dot'] = (config['domain'] or '').startswith('.')
config['path_specified'] = bool(config['path'])
return Cookie(**config) | [
"def",
"create_cookie",
"(",
"name",
",",
"value",
",",
"domain",
",",
"httponly",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"domain",
"==",
"'localhost'",
":",
"domain",
"=",
"''",
"config",
"=",
"dict",
"(",
"name",
"=",
"name",
",",
... | Creates `cookielib.Cookie` instance | [
"Creates",
"cookielib",
".",
"Cookie",
"instance"
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/cookie.py#L118-L152 | train | 214,878 |
lorien/grab | grab/cookie.py | CookieManager.set | def set(self, name, value, domain, **kwargs):
"""Add new cookie or replace existing cookie with same parameters.
:param name: name of cookie
:param value: value of cookie
:param kwargs: extra attributes of cookie
"""
if domain == 'localhost':
domain = ''
self.cookiejar.set_cookie(create_cookie(name, value, domain, **kwargs)) | python | def set(self, name, value, domain, **kwargs):
"""Add new cookie or replace existing cookie with same parameters.
:param name: name of cookie
:param value: value of cookie
:param kwargs: extra attributes of cookie
"""
if domain == 'localhost':
domain = ''
self.cookiejar.set_cookie(create_cookie(name, value, domain, **kwargs)) | [
"def",
"set",
"(",
"self",
",",
"name",
",",
"value",
",",
"domain",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"domain",
"==",
"'localhost'",
":",
"domain",
"=",
"''",
"self",
".",
"cookiejar",
".",
"set_cookie",
"(",
"create_cookie",
"(",
"name",
","... | Add new cookie or replace existing cookie with same parameters.
:param name: name of cookie
:param value: value of cookie
:param kwargs: extra attributes of cookie | [
"Add",
"new",
"cookie",
"or",
"replace",
"existing",
"cookie",
"with",
"same",
"parameters",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/cookie.py#L176-L187 | train | 214,879 |
lorien/grab | grab/cookie.py | CookieManager.load_from_file | def load_from_file(self, path):
"""
Load cookies from the file.
Content of file should be a JSON-serialized list of dicts.
"""
with open(path) as inf:
data = inf.read()
if data:
items = json.loads(data)
else:
items = {}
for item in items:
extra = dict((x, y) for x, y in item.items()
if x not in ['name', 'value', 'domain'])
self.set(item['name'], item['value'], item['domain'], **extra) | python | def load_from_file(self, path):
"""
Load cookies from the file.
Content of file should be a JSON-serialized list of dicts.
"""
with open(path) as inf:
data = inf.read()
if data:
items = json.loads(data)
else:
items = {}
for item in items:
extra = dict((x, y) for x, y in item.items()
if x not in ['name', 'value', 'domain'])
self.set(item['name'], item['value'], item['domain'], **extra) | [
"def",
"load_from_file",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"inf",
":",
"data",
"=",
"inf",
".",
"read",
"(",
")",
"if",
"data",
":",
"items",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"else",
":",
"i... | Load cookies from the file.
Content of file should be a JSON-serialized list of dicts. | [
"Load",
"cookies",
"from",
"the",
"file",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/cookie.py#L245-L261 | train | 214,880 |
lorien/grab | grab/cookie.py | CookieManager.save_to_file | def save_to_file(self, path):
"""
Dump all cookies to file.
Cookies are dumped as JSON-serialized dict of keys and values.
"""
with open(path, 'w') as out:
out.write(json.dumps(self.get_dict())) | python | def save_to_file(self, path):
"""
Dump all cookies to file.
Cookies are dumped as JSON-serialized dict of keys and values.
"""
with open(path, 'w') as out:
out.write(json.dumps(self.get_dict())) | [
"def",
"save_to_file",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"get_dict",
"(",
")",
")",
")"
] | Dump all cookies to file.
Cookies are dumped as JSON-serialized dict of keys and values. | [
"Dump",
"all",
"cookies",
"to",
"file",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/cookie.py#L269-L277 | train | 214,881 |
lorien/grab | grab/spider/task_dispatcher_service.py | TaskDispatcherService.process_service_result | def process_service_result(self, result, task, meta=None):
"""
Process result submitted from any service to task dispatcher service.
Result could be:
* Task
* None
* Task instance
* ResponseNotValid-based exception
* Arbitrary exception
* Network response:
{ok, ecode, emsg, error_abbr, exc, grab, grab_config_backup}
Exception can come only from parser_service and it always has
meta {"from": "parser", "exc_info": <...>}
"""
if meta is None:
meta = {}
if isinstance(result, Task):
if meta.get('source') == 'cache_reader':
self.spider.add_task(result, queue=self.spider.task_queue)
else:
self.spider.add_task(result)
elif result is None:
pass
elif isinstance(result, ResponseNotValid):
self.spider.add_task(task.clone(refresh_cache=True))
error_code = result.__class__.__name__.replace('_', '-')
self.spider.stat.inc('integrity:%s' % error_code)
elif isinstance(result, Exception):
if task:
handler = self.spider.find_task_handler(task)
handler_name = getattr(handler, '__name__', 'NONE')
else:
handler_name = 'NA'
self.spider.process_parser_error(
handler_name, task, meta['exc_info'],
)
if isinstance(result, FatalError):
self.spider.fatal_error_queue.put(meta['exc_info'])
elif isinstance(result, dict) and 'grab' in result:
if (self.spider.cache_writer_service
and not result.get('from_cache')
and result['ok']):
self.spider.cache_writer_service.input_queue.put(
(task, result['grab'])
)
# TODO: Move to network service
# starts
self.spider.log_network_result_stats(result, task)
# ends
is_valid = False
if task.get('raw'):
is_valid = True
elif result['ok']:
res_code = result['grab'].doc.code
is_valid = self.spider.is_valid_network_response_code(
res_code, task
)
if is_valid:
self.spider.parser_service.input_queue.put((result, task))
else:
self.spider.log_failed_network_result(result)
# Try to do network request one more time
# TODO:
# Implement valid_try_limit
# Use it if request failed not because of network error
# But because of content integrity check
if self.spider.network_try_limit > 0:
task.refresh_cache = True
task.setup_grab_config(
result['grab_config_backup'])
self.spider.add_task(task)
if result.get('from_cache'):
self.spider.stat.inc('spider:task-%s-cache'
% task.name)
self.spider.stat.inc('spider:request')
else:
raise SpiderError('Unknown result received from a service: %s'
% result) | python | def process_service_result(self, result, task, meta=None):
"""
Process result submitted from any service to task dispatcher service.
Result could be:
* Task
* None
* Task instance
* ResponseNotValid-based exception
* Arbitrary exception
* Network response:
{ok, ecode, emsg, error_abbr, exc, grab, grab_config_backup}
Exception can come only from parser_service and it always has
meta {"from": "parser", "exc_info": <...>}
"""
if meta is None:
meta = {}
if isinstance(result, Task):
if meta.get('source') == 'cache_reader':
self.spider.add_task(result, queue=self.spider.task_queue)
else:
self.spider.add_task(result)
elif result is None:
pass
elif isinstance(result, ResponseNotValid):
self.spider.add_task(task.clone(refresh_cache=True))
error_code = result.__class__.__name__.replace('_', '-')
self.spider.stat.inc('integrity:%s' % error_code)
elif isinstance(result, Exception):
if task:
handler = self.spider.find_task_handler(task)
handler_name = getattr(handler, '__name__', 'NONE')
else:
handler_name = 'NA'
self.spider.process_parser_error(
handler_name, task, meta['exc_info'],
)
if isinstance(result, FatalError):
self.spider.fatal_error_queue.put(meta['exc_info'])
elif isinstance(result, dict) and 'grab' in result:
if (self.spider.cache_writer_service
and not result.get('from_cache')
and result['ok']):
self.spider.cache_writer_service.input_queue.put(
(task, result['grab'])
)
# TODO: Move to network service
# starts
self.spider.log_network_result_stats(result, task)
# ends
is_valid = False
if task.get('raw'):
is_valid = True
elif result['ok']:
res_code = result['grab'].doc.code
is_valid = self.spider.is_valid_network_response_code(
res_code, task
)
if is_valid:
self.spider.parser_service.input_queue.put((result, task))
else:
self.spider.log_failed_network_result(result)
# Try to do network request one more time
# TODO:
# Implement valid_try_limit
# Use it if request failed not because of network error
# But because of content integrity check
if self.spider.network_try_limit > 0:
task.refresh_cache = True
task.setup_grab_config(
result['grab_config_backup'])
self.spider.add_task(task)
if result.get('from_cache'):
self.spider.stat.inc('spider:task-%s-cache'
% task.name)
self.spider.stat.inc('spider:request')
else:
raise SpiderError('Unknown result received from a service: %s'
% result) | [
"def",
"process_service_result",
"(",
"self",
",",
"result",
",",
"task",
",",
"meta",
"=",
"None",
")",
":",
"if",
"meta",
"is",
"None",
":",
"meta",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"result",
",",
"Task",
")",
":",
"if",
"meta",
".",
"get"... | Process result submitted from any service to task dispatcher service.
Result could be:
* Task
* None
* Task instance
* ResponseNotValid-based exception
* Arbitrary exception
* Network response:
{ok, ecode, emsg, error_abbr, exc, grab, grab_config_backup}
Exception can come only from parser_service and it always has
meta {"from": "parser", "exc_info": <...>} | [
"Process",
"result",
"submitted",
"from",
"any",
"service",
"to",
"task",
"dispatcher",
"service",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/spider/task_dispatcher_service.py#L29-L109 | train | 214,882 |
lorien/grab | grab/deprecated.py | DeprecatedThings.find_link | def find_link(self, href_pattern, make_absolute=True):
"""
Find link in response body which href value matches ``href_pattern``.
Returns found url or None.
"""
if make_absolute:
self.tree.make_links_absolute(self.doc.url)
if isinstance(href_pattern, six.text_type):
raise GrabMisuseError('Method `find_link` accepts only '
'byte-string argument')
href_pattern = make_unicode(href_pattern)
for elem, _, link, _ in self.tree.iterlinks():
if elem.tag == 'a' and href_pattern in link:
return link
return None | python | def find_link(self, href_pattern, make_absolute=True):
"""
Find link in response body which href value matches ``href_pattern``.
Returns found url or None.
"""
if make_absolute:
self.tree.make_links_absolute(self.doc.url)
if isinstance(href_pattern, six.text_type):
raise GrabMisuseError('Method `find_link` accepts only '
'byte-string argument')
href_pattern = make_unicode(href_pattern)
for elem, _, link, _ in self.tree.iterlinks():
if elem.tag == 'a' and href_pattern in link:
return link
return None | [
"def",
"find_link",
"(",
"self",
",",
"href_pattern",
",",
"make_absolute",
"=",
"True",
")",
":",
"if",
"make_absolute",
":",
"self",
".",
"tree",
".",
"make_links_absolute",
"(",
"self",
".",
"doc",
".",
"url",
")",
"if",
"isinstance",
"(",
"href_pattern... | Find link in response body which href value matches ``href_pattern``.
Returns found url or None. | [
"Find",
"link",
"in",
"response",
"body",
"which",
"href",
"value",
"matches",
"href_pattern",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L76-L93 | train | 214,883 |
lorien/grab | grab/deprecated.py | DeprecatedThings.find_link_rex | def find_link_rex(self, rex, make_absolute=True):
"""
Find link matched the given regular expression in response body.
Returns found url or None.
"""
if make_absolute:
self.tree.make_links_absolute(self.doc.url)
for elem, _, link, _ in self.tree.iterlinks():
if elem.tag == 'a':
match = rex.search(link)
if match:
# That does not work for string object
# link.match = match
return link
return None | python | def find_link_rex(self, rex, make_absolute=True):
"""
Find link matched the given regular expression in response body.
Returns found url or None.
"""
if make_absolute:
self.tree.make_links_absolute(self.doc.url)
for elem, _, link, _ in self.tree.iterlinks():
if elem.tag == 'a':
match = rex.search(link)
if match:
# That does not work for string object
# link.match = match
return link
return None | [
"def",
"find_link_rex",
"(",
"self",
",",
"rex",
",",
"make_absolute",
"=",
"True",
")",
":",
"if",
"make_absolute",
":",
"self",
".",
"tree",
".",
"make_links_absolute",
"(",
"self",
".",
"doc",
".",
"url",
")",
"for",
"elem",
",",
"_",
",",
"link",
... | Find link matched the given regular expression in response body.
Returns found url or None. | [
"Find",
"link",
"matched",
"the",
"given",
"regular",
"expression",
"in",
"response",
"body",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L96-L113 | train | 214,884 |
lorien/grab | grab/deprecated.py | DeprecatedThings.css_one | def css_one(self, path, default=NULL):
"""
Get first element which matches the given css path
or raise DataNotFound.
"""
try:
return self.css_list(path)[0]
except IndexError:
if default is NULL:
raise DataNotFound('CSS path not found: %s' % path)
else:
return default | python | def css_one(self, path, default=NULL):
"""
Get first element which matches the given css path
or raise DataNotFound.
"""
try:
return self.css_list(path)[0]
except IndexError:
if default is NULL:
raise DataNotFound('CSS path not found: %s' % path)
else:
return default | [
"def",
"css_one",
"(",
"self",
",",
"path",
",",
"default",
"=",
"NULL",
")",
":",
"try",
":",
"return",
"self",
".",
"css_list",
"(",
"path",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"if",
"default",
"is",
"NULL",
":",
"raise",
"DataNotFound... | Get first element which matches the given css path
or raise DataNotFound. | [
"Get",
"first",
"element",
"which",
"matches",
"the",
"given",
"css",
"path",
"or",
"raise",
"DataNotFound",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L150-L162 | train | 214,885 |
lorien/grab | grab/deprecated.py | DeprecatedThings.css_text | def css_text(self, path, default=NULL, smart=False, normalize_space=True):
"""
Get normalized text of node which matches the css path.
"""
try:
return get_node_text(self.css_one(path), smart=smart,
normalize_space=normalize_space)
except IndexError:
if default is NULL:
raise
else:
return default | python | def css_text(self, path, default=NULL, smart=False, normalize_space=True):
"""
Get normalized text of node which matches the css path.
"""
try:
return get_node_text(self.css_one(path), smart=smart,
normalize_space=normalize_space)
except IndexError:
if default is NULL:
raise
else:
return default | [
"def",
"css_text",
"(",
"self",
",",
"path",
",",
"default",
"=",
"NULL",
",",
"smart",
"=",
"False",
",",
"normalize_space",
"=",
"True",
")",
":",
"try",
":",
"return",
"get_node_text",
"(",
"self",
".",
"css_one",
"(",
"path",
")",
",",
"smart",
"... | Get normalized text of node which matches the css path. | [
"Get",
"normalized",
"text",
"of",
"node",
"which",
"matches",
"the",
"css",
"path",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L173-L185 | train | 214,886 |
lorien/grab | grab/deprecated.py | DeprecatedThings.css_number | def css_number(self, path, default=NULL, ignore_spaces=False, smart=False,
make_int=True):
"""
Find number in normalized text of node which
matches the given css path.
"""
try:
text = self.css_text(path, smart=smart)
return find_number(text, ignore_spaces=ignore_spaces,
make_int=make_int)
except IndexError:
if default is NULL:
raise
else:
return default | python | def css_number(self, path, default=NULL, ignore_spaces=False, smart=False,
make_int=True):
"""
Find number in normalized text of node which
matches the given css path.
"""
try:
text = self.css_text(path, smart=smart)
return find_number(text, ignore_spaces=ignore_spaces,
make_int=make_int)
except IndexError:
if default is NULL:
raise
else:
return default | [
"def",
"css_number",
"(",
"self",
",",
"path",
",",
"default",
"=",
"NULL",
",",
"ignore_spaces",
"=",
"False",
",",
"smart",
"=",
"False",
",",
"make_int",
"=",
"True",
")",
":",
"try",
":",
"text",
"=",
"self",
".",
"css_text",
"(",
"path",
",",
... | Find number in normalized text of node which
matches the given css path. | [
"Find",
"number",
"in",
"normalized",
"text",
"of",
"node",
"which",
"matches",
"the",
"given",
"css",
"path",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L188-L203 | train | 214,887 |
lorien/grab | grab/deprecated.py | DeprecatedThings.strip_tags | def strip_tags(self, content, smart=False):
"""
Strip tags from the HTML content.
"""
from lxml.html import fromstring
return get_node_text(fromstring(content), smart=smart) | python | def strip_tags(self, content, smart=False):
"""
Strip tags from the HTML content.
"""
from lxml.html import fromstring
return get_node_text(fromstring(content), smart=smart) | [
"def",
"strip_tags",
"(",
"self",
",",
"content",
",",
"smart",
"=",
"False",
")",
":",
"from",
"lxml",
".",
"html",
"import",
"fromstring",
"return",
"get_node_text",
"(",
"fromstring",
"(",
"content",
")",
",",
"smart",
"=",
"smart",
")"
] | Strip tags from the HTML content. | [
"Strip",
"tags",
"from",
"the",
"HTML",
"content",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L230-L236 | train | 214,888 |
lorien/grab | grab/util/misc.py | camel_case_to_underscore | def camel_case_to_underscore(name):
"""Converts camel_case into CamelCase"""
res = RE_TOKEN1.sub(r'\1_\2', name)
res = RE_TOKEN2.sub(r'\1_\2', res)
return res.lower() | python | def camel_case_to_underscore(name):
"""Converts camel_case into CamelCase"""
res = RE_TOKEN1.sub(r'\1_\2', name)
res = RE_TOKEN2.sub(r'\1_\2', res)
return res.lower() | [
"def",
"camel_case_to_underscore",
"(",
"name",
")",
":",
"res",
"=",
"RE_TOKEN1",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"name",
")",
"res",
"=",
"RE_TOKEN2",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"res",
")",
"return",
"res",
".",
"lower",
"(",
")"
] | Converts camel_case into CamelCase | [
"Converts",
"camel_case",
"into",
"CamelCase"
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/util/misc.py#L8-L12 | train | 214,889 |
lorien/grab | grab/document.py | read_bom | def read_bom(data):
"""Read the byte order mark in the text, if present, and
return the encoding represented by the BOM and the BOM.
If no BOM can be detected, (None, None) is returned.
"""
# common case is no BOM, so this is fast
if data and data[0] in _FIRST_CHARS:
for bom, encoding in _BOM_TABLE:
if data.startswith(bom):
return encoding, bom
return None, None | python | def read_bom(data):
"""Read the byte order mark in the text, if present, and
return the encoding represented by the BOM and the BOM.
If no BOM can be detected, (None, None) is returned.
"""
# common case is no BOM, so this is fast
if data and data[0] in _FIRST_CHARS:
for bom, encoding in _BOM_TABLE:
if data.startswith(bom):
return encoding, bom
return None, None | [
"def",
"read_bom",
"(",
"data",
")",
":",
"# common case is no BOM, so this is fast",
"if",
"data",
"and",
"data",
"[",
"0",
"]",
"in",
"_FIRST_CHARS",
":",
"for",
"bom",
",",
"encoding",
"in",
"_BOM_TABLE",
":",
"if",
"data",
".",
"startswith",
"(",
"bom",
... | Read the byte order mark in the text, if present, and
return the encoding represented by the BOM and the BOM.
If no BOM can be detected, (None, None) is returned. | [
"Read",
"the",
"byte",
"order",
"mark",
"in",
"the",
"text",
"if",
"present",
"and",
"return",
"the",
"encoding",
"represented",
"by",
"the",
"BOM",
"and",
"the",
"BOM",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L67-L78 | train | 214,890 |
lorien/grab | grab/document.py | Document.parse | def parse(self, charset=None, headers=None):
"""
Parse headers.
This method is called after Grab instance performs network request.
"""
if headers:
self.headers = headers
else:
# Parse headers only from last response
# There could be multiple responses in `self.head`
# in case of 301/302 redirect
# Separate responses
if self.head:
responses = self.head.rsplit(b'\nHTTP/', 1)
# Cut off the 'HTTP/*' line from the last response
_, response = responses[-1].split(b'\n', 1)
response = response.decode('utf-8', 'ignore')
else:
response = u''
if six.PY2:
# email_from_string does not work with unicode input
response = response.encode('utf-8')
self.headers = email.message_from_string(response)
if charset is None:
if isinstance(self.body, six.text_type):
self.charset = 'utf-8'
else:
self.detect_charset()
else:
self.charset = charset.lower()
self._unicode_body = None | python | def parse(self, charset=None, headers=None):
"""
Parse headers.
This method is called after Grab instance performs network request.
"""
if headers:
self.headers = headers
else:
# Parse headers only from last response
# There could be multiple responses in `self.head`
# in case of 301/302 redirect
# Separate responses
if self.head:
responses = self.head.rsplit(b'\nHTTP/', 1)
# Cut off the 'HTTP/*' line from the last response
_, response = responses[-1].split(b'\n', 1)
response = response.decode('utf-8', 'ignore')
else:
response = u''
if six.PY2:
# email_from_string does not work with unicode input
response = response.encode('utf-8')
self.headers = email.message_from_string(response)
if charset is None:
if isinstance(self.body, six.text_type):
self.charset = 'utf-8'
else:
self.detect_charset()
else:
self.charset = charset.lower()
self._unicode_body = None | [
"def",
"parse",
"(",
"self",
",",
"charset",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"if",
"headers",
":",
"self",
".",
"headers",
"=",
"headers",
"else",
":",
"# Parse headers only from last response",
"# There could be multiple responses in `self.head`... | Parse headers.
This method is called after Grab instance performs network request. | [
"Parse",
"headers",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L159-L193 | train | 214,891 |
lorien/grab | grab/document.py | Document.detect_charset | def detect_charset(self):
"""
Detect charset of the response.
Try following methods:
* meta[name="Http-Equiv"]
* XML declaration
* HTTP Content-Type header
Ignore unknown charsets.
Use utf-8 as fallback charset.
"""
charset = None
body_chunk = self.get_body_chunk()
if body_chunk:
# Try to extract charset from http-equiv meta tag
match_charset = RE_META_CHARSET.search(body_chunk)
if match_charset:
charset = match_charset.group(1)
else:
match_charset_html5 = RE_META_CHARSET_HTML5.search(body_chunk)
if match_charset_html5:
charset = match_charset_html5.group(1)
# TODO: <meta charset="utf-8" />
bom_enc, bom = read_bom(body_chunk)
if bom_enc:
charset = bom_enc
self.bom = bom
# Try to process XML declaration
if not charset:
if body_chunk.startswith(b'<?xml'):
match = RE_XML_DECLARATION.search(body_chunk)
if match:
enc_match = RE_DECLARATION_ENCODING.search(
match.group(0))
if enc_match:
charset = enc_match.group(1)
if not charset:
if 'Content-Type' in self.headers:
pos = self.headers['Content-Type'].find('charset=')
if pos > -1:
charset = self.headers['Content-Type'][(pos + 8):]
if charset:
charset = charset.lower()
if not isinstance(charset, str):
# Convert to unicode (py2.x) or string (py3.x)
charset = charset.decode('utf-8')
# Check that python knows such charset
try:
codecs.lookup(charset)
except LookupError:
logger.debug('Unknown charset found: %s.'
' Using utf-8 istead.', charset)
self.charset = 'utf-8'
else:
self.charset = charset | python | def detect_charset(self):
"""
Detect charset of the response.
Try following methods:
* meta[name="Http-Equiv"]
* XML declaration
* HTTP Content-Type header
Ignore unknown charsets.
Use utf-8 as fallback charset.
"""
charset = None
body_chunk = self.get_body_chunk()
if body_chunk:
# Try to extract charset from http-equiv meta tag
match_charset = RE_META_CHARSET.search(body_chunk)
if match_charset:
charset = match_charset.group(1)
else:
match_charset_html5 = RE_META_CHARSET_HTML5.search(body_chunk)
if match_charset_html5:
charset = match_charset_html5.group(1)
# TODO: <meta charset="utf-8" />
bom_enc, bom = read_bom(body_chunk)
if bom_enc:
charset = bom_enc
self.bom = bom
# Try to process XML declaration
if not charset:
if body_chunk.startswith(b'<?xml'):
match = RE_XML_DECLARATION.search(body_chunk)
if match:
enc_match = RE_DECLARATION_ENCODING.search(
match.group(0))
if enc_match:
charset = enc_match.group(1)
if not charset:
if 'Content-Type' in self.headers:
pos = self.headers['Content-Type'].find('charset=')
if pos > -1:
charset = self.headers['Content-Type'][(pos + 8):]
if charset:
charset = charset.lower()
if not isinstance(charset, str):
# Convert to unicode (py2.x) or string (py3.x)
charset = charset.decode('utf-8')
# Check that python knows such charset
try:
codecs.lookup(charset)
except LookupError:
logger.debug('Unknown charset found: %s.'
' Using utf-8 istead.', charset)
self.charset = 'utf-8'
else:
self.charset = charset | [
"def",
"detect_charset",
"(",
"self",
")",
":",
"charset",
"=",
"None",
"body_chunk",
"=",
"self",
".",
"get_body_chunk",
"(",
")",
"if",
"body_chunk",
":",
"# Try to extract charset from http-equiv meta tag",
"match_charset",
"=",
"RE_META_CHARSET",
".",
"search",
... | Detect charset of the response.
Try following methods:
* meta[name="Http-Equiv"]
* XML declaration
* HTTP Content-Type header
Ignore unknown charsets.
Use utf-8 as fallback charset. | [
"Detect",
"charset",
"of",
"the",
"response",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L195-L258 | train | 214,892 |
lorien/grab | grab/document.py | Document.copy | def copy(self, new_grab=None):
"""
Clone the Response object.
"""
obj = self.__class__()
obj.process_grab(new_grab if new_grab else self.grab)
copy_keys = ('status', 'code', 'head', 'body', 'total_time',
'connect_time', 'name_lookup_time',
'url', 'charset', '_unicode_body',
'_grab_config')
for key in copy_keys:
setattr(obj, key, getattr(self, key))
obj.headers = copy(self.headers)
# TODO: Maybe, deepcopy?
obj.cookies = copy(self.cookies)
return obj | python | def copy(self, new_grab=None):
"""
Clone the Response object.
"""
obj = self.__class__()
obj.process_grab(new_grab if new_grab else self.grab)
copy_keys = ('status', 'code', 'head', 'body', 'total_time',
'connect_time', 'name_lookup_time',
'url', 'charset', '_unicode_body',
'_grab_config')
for key in copy_keys:
setattr(obj, key, getattr(self, key))
obj.headers = copy(self.headers)
# TODO: Maybe, deepcopy?
obj.cookies = copy(self.cookies)
return obj | [
"def",
"copy",
"(",
"self",
",",
"new_grab",
"=",
"None",
")",
":",
"obj",
"=",
"self",
".",
"__class__",
"(",
")",
"obj",
".",
"process_grab",
"(",
"new_grab",
"if",
"new_grab",
"else",
"self",
".",
"grab",
")",
"copy_keys",
"=",
"(",
"'status'",
",... | Clone the Response object. | [
"Clone",
"the",
"Response",
"object",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L260-L279 | train | 214,893 |
lorien/grab | grab/document.py | Document.save | def save(self, path):
"""
Save response body to file.
"""
path_dir = os.path.split(path)[0]
if not os.path.exists(path_dir):
try:
os.makedirs(path_dir)
except OSError:
pass
with open(path, 'wb') as out:
out.write(self._bytes_body if self._bytes_body is not None
else b'') | python | def save(self, path):
"""
Save response body to file.
"""
path_dir = os.path.split(path)[0]
if not os.path.exists(path_dir):
try:
os.makedirs(path_dir)
except OSError:
pass
with open(path, 'wb') as out:
out.write(self._bytes_body if self._bytes_body is not None
else b'') | [
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"path_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"0",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path_dir",
")",
":",
"try",
":",
"os",
".",
"makedirs",
... | Save response body to file. | [
"Save",
"response",
"body",
"to",
"file",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L281-L295 | train | 214,894 |
lorien/grab | grab/document.py | Document.save_hash | def save_hash(self, location, basedir, ext=None):
"""
Save response body into file with special path
builded from hash. That allows to lower number of files
per directory.
:param location: URL of file or something else. It is
used to build the SHA1 hash.
:param basedir: base directory to save the file. Note that
file will not be saved directly to this directory but to
some sub-directory of `basedir`
:param ext: extension which should be appended to file name. The
dot is inserted automatically between filename and extension.
:returns: path to saved file relative to `basedir`
Example::
>>> url = 'http://yandex.ru/logo.png'
>>> g.go(url)
>>> g.response.save_hash(url, 'some_dir', ext='png')
'e8/dc/f2918108788296df1facadc975d32b361a6a.png'
# the file was saved to $PWD/some_dir/e8/dc/...
TODO: replace `basedir` with two options: root and save_to. And
returns save_to + path
"""
if isinstance(location, six.text_type):
location = location.encode('utf-8')
rel_path = hashed_path(location, ext=ext)
path = os.path.join(basedir, rel_path)
if not os.path.exists(path):
path_dir, _ = os.path.split(path)
try:
os.makedirs(path_dir)
except OSError:
pass
with open(path, 'wb') as out:
out.write(self._bytes_body)
return rel_path | python | def save_hash(self, location, basedir, ext=None):
"""
Save response body into file with special path
builded from hash. That allows to lower number of files
per directory.
:param location: URL of file or something else. It is
used to build the SHA1 hash.
:param basedir: base directory to save the file. Note that
file will not be saved directly to this directory but to
some sub-directory of `basedir`
:param ext: extension which should be appended to file name. The
dot is inserted automatically between filename and extension.
:returns: path to saved file relative to `basedir`
Example::
>>> url = 'http://yandex.ru/logo.png'
>>> g.go(url)
>>> g.response.save_hash(url, 'some_dir', ext='png')
'e8/dc/f2918108788296df1facadc975d32b361a6a.png'
# the file was saved to $PWD/some_dir/e8/dc/...
TODO: replace `basedir` with two options: root and save_to. And
returns save_to + path
"""
if isinstance(location, six.text_type):
location = location.encode('utf-8')
rel_path = hashed_path(location, ext=ext)
path = os.path.join(basedir, rel_path)
if not os.path.exists(path):
path_dir, _ = os.path.split(path)
try:
os.makedirs(path_dir)
except OSError:
pass
with open(path, 'wb') as out:
out.write(self._bytes_body)
return rel_path | [
"def",
"save_hash",
"(",
"self",
",",
"location",
",",
"basedir",
",",
"ext",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"location",
",",
"six",
".",
"text_type",
")",
":",
"location",
"=",
"location",
".",
"encode",
"(",
"'utf-8'",
")",
"rel_path... | Save response body into file with special path
builded from hash. That allows to lower number of files
per directory.
:param location: URL of file or something else. It is
used to build the SHA1 hash.
:param basedir: base directory to save the file. Note that
file will not be saved directly to this directory but to
some sub-directory of `basedir`
:param ext: extension which should be appended to file name. The
dot is inserted automatically between filename and extension.
:returns: path to saved file relative to `basedir`
Example::
>>> url = 'http://yandex.ru/logo.png'
>>> g.go(url)
>>> g.response.save_hash(url, 'some_dir', ext='png')
'e8/dc/f2918108788296df1facadc975d32b361a6a.png'
# the file was saved to $PWD/some_dir/e8/dc/...
TODO: replace `basedir` with two options: root and save_to. And
returns save_to + path | [
"Save",
"response",
"body",
"into",
"file",
"with",
"special",
"path",
"builded",
"from",
"hash",
".",
"That",
"allows",
"to",
"lower",
"number",
"of",
"files",
"per",
"directory",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L297-L336 | train | 214,895 |
lorien/grab | grab/document.py | Document.json | def json(self):
"""
Return response body deserialized into JSON object.
"""
if six.PY3:
return json.loads(self.body.decode(self.charset))
else:
return json.loads(self.body) | python | def json(self):
"""
Return response body deserialized into JSON object.
"""
if six.PY3:
return json.loads(self.body.decode(self.charset))
else:
return json.loads(self.body) | [
"def",
"json",
"(",
"self",
")",
":",
"if",
"six",
".",
"PY3",
":",
"return",
"json",
".",
"loads",
"(",
"self",
".",
"body",
".",
"decode",
"(",
"self",
".",
"charset",
")",
")",
"else",
":",
"return",
"json",
".",
"loads",
"(",
"self",
".",
"... | Return response body deserialized into JSON object. | [
"Return",
"response",
"body",
"deserialized",
"into",
"JSON",
"object",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L339-L347 | train | 214,896 |
lorien/grab | grab/document.py | Document.browse | def browse(self):
"""
Save response in temporary file and open it in GUI browser.
"""
_, path = tempfile.mkstemp()
self.save(path)
webbrowser.open('file://' + path) | python | def browse(self):
"""
Save response in temporary file and open it in GUI browser.
"""
_, path = tempfile.mkstemp()
self.save(path)
webbrowser.open('file://' + path) | [
"def",
"browse",
"(",
"self",
")",
":",
"_",
",",
"path",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"self",
".",
"save",
"(",
"path",
")",
"webbrowser",
".",
"open",
"(",
"'file://'",
"+",
"path",
")"
] | Save response in temporary file and open it in GUI browser. | [
"Save",
"response",
"in",
"temporary",
"file",
"and",
"open",
"it",
"in",
"GUI",
"browser",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L363-L370 | train | 214,897 |
lorien/grab | grab/document.py | Document.text_search | def text_search(self, anchor, byte=False):
"""
Search the substring in response body.
:param anchor: string to search
:param byte: if False then `anchor` should be the
unicode string, and search will be performed in
`response.unicode_body()` else `anchor` should be the byte-string
and search will be performed in `response.body`
If substring is found return True else False.
"""
if isinstance(anchor, six.text_type):
if byte:
raise GrabMisuseError('The anchor should be bytes string in '
'byte mode')
else:
return anchor in self.unicode_body()
if not isinstance(anchor, six.text_type):
if byte:
# if six.PY3:
# return anchor in self.body_as_bytes()
return anchor in self.body
else:
raise GrabMisuseError('The anchor should be byte string in '
'non-byte mode') | python | def text_search(self, anchor, byte=False):
"""
Search the substring in response body.
:param anchor: string to search
:param byte: if False then `anchor` should be the
unicode string, and search will be performed in
`response.unicode_body()` else `anchor` should be the byte-string
and search will be performed in `response.body`
If substring is found return True else False.
"""
if isinstance(anchor, six.text_type):
if byte:
raise GrabMisuseError('The anchor should be bytes string in '
'byte mode')
else:
return anchor in self.unicode_body()
if not isinstance(anchor, six.text_type):
if byte:
# if six.PY3:
# return anchor in self.body_as_bytes()
return anchor in self.body
else:
raise GrabMisuseError('The anchor should be byte string in '
'non-byte mode') | [
"def",
"text_search",
"(",
"self",
",",
"anchor",
",",
"byte",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"anchor",
",",
"six",
".",
"text_type",
")",
":",
"if",
"byte",
":",
"raise",
"GrabMisuseError",
"(",
"'The anchor should be bytes string in '",
"... | Search the substring in response body.
:param anchor: string to search
:param byte: if False then `anchor` should be the
unicode string, and search will be performed in
`response.unicode_body()` else `anchor` should be the byte-string
and search will be performed in `response.body`
If substring is found return True else False. | [
"Search",
"the",
"substring",
"in",
"response",
"body",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L403-L430 | train | 214,898 |
lorien/grab | grab/document.py | Document.text_assert | def text_assert(self, anchor, byte=False):
"""
If `anchor` is not found then raise `DataNotFound` exception.
"""
if not self.text_search(anchor, byte=byte):
raise DataNotFound(u'Substring not found: %s' % anchor) | python | def text_assert(self, anchor, byte=False):
"""
If `anchor` is not found then raise `DataNotFound` exception.
"""
if not self.text_search(anchor, byte=byte):
raise DataNotFound(u'Substring not found: %s' % anchor) | [
"def",
"text_assert",
"(",
"self",
",",
"anchor",
",",
"byte",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"text_search",
"(",
"anchor",
",",
"byte",
"=",
"byte",
")",
":",
"raise",
"DataNotFound",
"(",
"u'Substring not found: %s'",
"%",
"anchor",
... | If `anchor` is not found then raise `DataNotFound` exception. | [
"If",
"anchor",
"is",
"not",
"found",
"then",
"raise",
"DataNotFound",
"exception",
"."
] | 8b301db2a08c830245b61c589e58af6234f4db79 | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/document.py#L432-L438 | train | 214,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.